diff options
Diffstat (limited to 'drivers/gpu/drm')
1030 files changed, 59020 insertions, 28333 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index e67c194c2aca..1168351267fd 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -93,6 +93,20 @@ config DRM_KMS_FB_HELPER help FBDEV helpers for KMS drivers. +config DRM_DEBUG_DP_MST_TOPOLOGY_REFS + bool "Enable refcount backtrace history in the DP MST helpers" + select STACKDEPOT + depends on DRM_KMS_HELPER + depends on DEBUG_KERNEL + depends on EXPERT + help + Enables debug tracing for topology refs in DRM's DP MST helpers. A + history of each topology reference/dereference will be printed to the + kernel log once a port or branch device's topology refcount reaches 0. + + This has the potential to use a lot of memory and print some very + large kernel messages. If in doubt, say "N". + config DRM_FBDEV_EMULATION bool "Enable legacy fbdev support for your modesetting driver" depends on DRM @@ -165,13 +179,26 @@ config DRM_TTM GPU memory types. Will be enabled automatically if a device driver uses it. +config DRM_TTM_DMA_PAGE_POOL + bool + depends on DRM_TTM && (SWIOTLB || INTEL_IOMMU) + default y + help + Choose this if you need the TTM dma page pool + config DRM_VRAM_HELPER tristate depends on DRM - select DRM_TTM help Helpers for VRAM memory management +config DRM_TTM_HELPER + tristate + depends on DRM + select DRM_TTM + help + Helpers for ttm-based gem objects + config DRM_GEM_CMA_HELPER bool depends on DRM @@ -226,9 +253,9 @@ config DRM_AMDGPU tristate "AMD GPU" depends on DRM && PCI && MMU select FW_LOADER - select DRM_KMS_HELPER + select DRM_KMS_HELPER select DRM_SCHED - select DRM_TTM + select DRM_TTM select POWER_SUPPLY select HWMON select BACKLIGHT_CLASS_DEVICE @@ -257,6 +284,7 @@ config DRM_VKMS tristate "Virtual KMS (EXPERIMENTAL)" depends on DRM select DRM_KMS_HELPER + select CRC32 default n help Virtual Kernel Mode-Setting (VKMS) is used for testing or for @@ -397,7 +425,7 @@ config DRM_R128 config DRM_I810 tristate "Intel I810" - # !PREEMPT because of missing ioctl locking + # !PREEMPTION because of missing ioctl locking depends on DRM && AGP && AGP_INTEL && (!PREEMPTION || BROKEN) help Choose this option if you have an Intel I810 graphics card. If M is diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 82ff826b33cc..9f1c7c486f88 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -33,10 +33,12 @@ drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o drm_vram_helper-y := drm_gem_vram_helper.o \ - drm_vram_helper_common.o \ - drm_vram_mm_helper.o + drm_vram_helper_common.o obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o +drm_ttm_helper-y := drm_gem_ttm_helper.o +obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o + drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \ drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \ drm_kms_helper_common.o drm_dp_dual_mode_helper.o \ diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 00962a659009..ca0e435559d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -53,8 +53,9 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \ amdgpu_ucode.o amdgpu_bo_list.o amdgpu_ctx.o amdgpu_sync.o \ amdgpu_gtt_mgr.o amdgpu_vram_mgr.o amdgpu_virt.o amdgpu_atomfirmware.o \ amdgpu_vf_error.o amdgpu_sched.o amdgpu_debugfs.o amdgpu_ids.o \ - amdgpu_gmc.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ - amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o smu_v11_0_i2c.o + amdgpu_gmc.o amdgpu_mmhub.o amdgpu_xgmi.o amdgpu_csa.o amdgpu_ras.o amdgpu_vm_cpu.o \ + amdgpu_vm_sdma.o amdgpu_discovery.o amdgpu_ras_eeprom.o amdgpu_nbio.o \ + amdgpu_umc.o smu_v11_0_i2c.o amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o @@ -67,7 +68,7 @@ amdgpu-$(CONFIG_DRM_AMDGPU_SI)+= si.o gmc_v6_0.o gfx_v6_0.o si_ih.o si_dma.o dce amdgpu-y += \ vi.o mxgpu_vi.o nbio_v6_1.o soc15.o emu_soc.o mxgpu_ai.o nbio_v7_0.o vega10_reg_init.o \ vega20_reg_init.o nbio_v7_4.o nbio_v2_3.o nv.o navi10_reg_init.o navi14_reg_init.o \ - arct_reg_init.o navi12_reg_init.o + arct_reg_init.o navi12_reg_init.o mxgpu_nv.o # add DF block amdgpu-y += \ @@ -83,7 +84,7 @@ amdgpu-y += \ # add UMC block amdgpu-y += \ - umc_v6_1.o + umc_v6_1.o umc_v6_0.o # add IH block amdgpu-y += \ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index bd37df5dd6d0..bcc5d40a8d5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -73,6 +73,7 @@ #include "amdgpu_gmc.h" #include "amdgpu_gfx.h" #include "amdgpu_sdma.h" +#include "amdgpu_nbio.h" #include "amdgpu_dm.h" #include "amdgpu_virt.h" #include "amdgpu_csa.h" @@ -106,6 +107,8 @@ struct amdgpu_mgpu_info uint32_t num_apu; }; +#define AMDGPU_MAX_TIMEOUT_PARAM_LENGTH 256 + /* * Modules parameters. */ @@ -122,6 +125,7 @@ extern int amdgpu_disp_priority; extern int amdgpu_hw_i2c; extern int amdgpu_pcie_gen2; extern int amdgpu_msi; +extern char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; extern int amdgpu_dpm; extern int amdgpu_fw_load_type; extern int amdgpu_aspm; @@ -135,6 +139,7 @@ extern int amdgpu_vm_fragment_size; extern int amdgpu_vm_fault_stop; extern int amdgpu_vm_debug; extern int amdgpu_vm_update_mode; +extern int amdgpu_exp_hw_support; extern int amdgpu_dc; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; @@ -146,11 +151,7 @@ extern uint amdgpu_sdma_phase_quantum; extern char *amdgpu_disable_cu; extern char *amdgpu_virtual_display; extern uint amdgpu_pp_feature_mask; -extern int amdgpu_ngg; -extern int amdgpu_prim_buf_per_se; -extern int amdgpu_pos_buf_per_se; -extern int amdgpu_cntl_sb_buf_per_se; -extern int amdgpu_param_buf_per_se; +extern uint amdgpu_force_long_training; extern int amdgpu_job_hang_limit; extern int amdgpu_lbpw; extern int amdgpu_compute_multipipe; @@ -167,6 +168,12 @@ extern int amdgpu_mcbp; extern int amdgpu_discovery; extern int amdgpu_mes; extern int amdgpu_noretry; +extern int amdgpu_force_asic_type; +#ifdef CONFIG_HSA_AMD +extern int sched_policy; +#else +static const int sched_policy = KFD_SCHED_POLICY_HWS; +#endif #ifdef CONFIG_DRM_AMDGPU_SI extern int amdgpu_si_support; @@ -283,6 +290,9 @@ struct amdgpu_ip_block_version { const struct amd_ip_funcs *funcs; }; +#define HW_REV(_Major, _Minor, _Rev) \ + ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev))) + struct amdgpu_ip_block { struct amdgpu_ip_block_status status; const struct amdgpu_ip_block_version *version; @@ -425,7 +435,6 @@ struct amdgpu_fpriv { }; int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); -int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev); int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned size, struct amdgpu_ib *ib); @@ -477,7 +486,6 @@ struct amdgpu_cs_parser { uint64_t bytes_moved_vis_threshold; uint64_t bytes_moved; uint64_t bytes_moved_vis; - struct amdgpu_bo_list_entry *evictable; /* user fence */ struct amdgpu_bo_list_entry uf_entry; @@ -624,6 +632,11 @@ struct amdgpu_fw_vram_usage { u64 size; struct amdgpu_bo *reserved_bo; void *va; + + /* Offset on the top of VRAM, used as c2p write buffer. + */ + u64 mem_train_fb_loc; + bool mem_train_support; }; /* @@ -644,71 +657,14 @@ typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); - -/* - * amdgpu nbio functions - * - */ -struct nbio_hdp_flush_reg { - u32 ref_and_mask_cp0; - u32 ref_and_mask_cp1; - u32 ref_and_mask_cp2; - u32 ref_and_mask_cp3; - u32 ref_and_mask_cp4; - u32 ref_and_mask_cp5; - u32 ref_and_mask_cp6; - u32 ref_and_mask_cp7; - u32 ref_and_mask_cp8; - u32 ref_and_mask_cp9; - u32 ref_and_mask_sdma0; - u32 ref_and_mask_sdma1; - u32 ref_and_mask_sdma2; - u32 ref_and_mask_sdma3; - u32 ref_and_mask_sdma4; - u32 ref_and_mask_sdma5; - u32 ref_and_mask_sdma6; - u32 ref_and_mask_sdma7; -}; - struct amdgpu_mmio_remap { u32 reg_offset; resource_size_t bus_addr; }; -struct amdgpu_nbio_funcs { - const struct nbio_hdp_flush_reg *hdp_flush_reg; - u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev); - u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev); - u32 (*get_pcie_index_offset)(struct amdgpu_device *adev); - u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); - u32 (*get_rev_id)(struct amdgpu_device *adev); - void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); - void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); - u32 (*get_memsize)(struct amdgpu_device *adev); - void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, - bool use_doorbell, int doorbell_index, int doorbell_size); - void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell, - int doorbell_index, int instance); - void (*enable_doorbell_aperture)(struct amdgpu_device *adev, - bool enable); - void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, - bool enable); - void (*ih_doorbell_range)(struct amdgpu_device *adev, - bool use_doorbell, int doorbell_index); - void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, - bool enable); - void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev, - bool enable); - void (*get_clockgating_state)(struct amdgpu_device *adev, - u32 *flags); - void (*ih_control)(struct amdgpu_device *adev); - void (*init_registers)(struct amdgpu_device *adev); - void (*detect_hw_virt)(struct amdgpu_device *adev); - void (*remap_hdp_registers)(struct amdgpu_device *adev); -}; - struct amdgpu_df_funcs { void (*sw_init)(struct amdgpu_device *adev); + void (*sw_fini)(struct amdgpu_device *adev); void (*enable_broadcast_mode)(struct amdgpu_device *adev, bool enable); u32 (*get_fb_channel_number)(struct amdgpu_device *adev); @@ -813,6 +769,7 @@ struct amdgpu_device { uint8_t *bios; uint32_t bios_size; struct amdgpu_bo *stolen_vga_memory; + struct amdgpu_bo *discovery_memory; uint32_t bios_scratch_reg_offset; uint32_t bios_scratch[AMDGPU_BIOS_NUM_SCRATCH]; @@ -921,6 +878,12 @@ struct amdgpu_device { u32 cg_flags; u32 pg_flags; + /* nbio */ + struct amdgpu_nbio nbio; + + /* mmhub */ + struct amdgpu_mmhub mmhub; + /* gfx */ struct amdgpu_gfx gfx; @@ -974,9 +937,7 @@ struct amdgpu_device { /* soc15 register offset based on ip, instance and segment */ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; - const struct amdgpu_nbio_funcs *nbio_funcs; const struct amdgpu_df_funcs *df_funcs; - const struct amdgpu_mmhub_funcs *mmhub_funcs; /* delayed work_func for deferring clockgating during resume */ struct delayed_work delayed_init_work; @@ -1009,8 +970,6 @@ struct amdgpu_device { int asic_reset_res; struct work_struct xgmi_reset_work; - bool in_baco_reset; - long gfx_timeout; long sdma_timeout; long video_timeout; @@ -1018,6 +977,9 @@ struct amdgpu_device { uint64_t unique_id; uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; + + /* device pstate */ + int pstate; }; static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev) @@ -1032,6 +994,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, void amdgpu_device_fini(struct amdgpu_device *adev); int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); +void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, + uint32_t *buf, size_t size, bool write); uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t acc_flags); void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 07eb29885372..d3da9dde4ee1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -63,45 +63,10 @@ void amdgpu_amdkfd_fini(void) void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev) { - const struct kfd2kgd_calls *kfd2kgd; - - switch (adev->asic_type) { -#ifdef CONFIG_DRM_AMDGPU_CIK - case CHIP_KAVERI: - case CHIP_HAWAII: - kfd2kgd = amdgpu_amdkfd_gfx_7_get_functions(); - break; -#endif - case CHIP_CARRIZO: - case CHIP_TONGA: - case CHIP_FIJI: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_POLARIS12: - case CHIP_VEGAM: - kfd2kgd = amdgpu_amdkfd_gfx_8_0_get_functions(); - break; - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - kfd2kgd = amdgpu_amdkfd_gfx_9_0_get_functions(); - break; - case CHIP_ARCTURUS: - kfd2kgd = amdgpu_amdkfd_arcturus_get_functions(); - break; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - kfd2kgd = amdgpu_amdkfd_gfx_10_0_get_functions(); - break; - default: - dev_info(adev->dev, "kfd not supported on this ASIC\n"); - return; - } + bool vf = amdgpu_sriov_vf(adev); adev->kfd.dev = kgd2kfd_probe((struct kgd_dev *)adev, - adev->pdev, kfd2kgd); + adev->pdev, adev->asic_type, vf); if (adev->kfd.dev) amdgpu_amdkfd_total_mem_size += adev->gmc.real_vram_size; @@ -165,14 +130,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) adev->gfx.mec.queue_bitmap, KGD_MAX_QUEUES); - /* remove the KIQ bit as well */ - if (adev->gfx.kiq.ring.sched.ready) - clear_bit(amdgpu_gfx_mec_queue_to_bit(adev, - adev->gfx.kiq.ring.me - 1, - adev->gfx.kiq.ring.pipe, - adev->gfx.kiq.ring.queue), - gpu_resources.queue_bitmap); - /* According to linux/bitmap.h we shouldn't use bitmap_clear if * nbits is not compile time constant */ @@ -202,7 +159,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) adev->doorbell_index.last_non_cp; } - kgd2kfd_device_init(adev->kfd.dev, &gpu_resources); + kgd2kfd_device_init(adev->kfd.dev, adev->ddev, &gpu_resources); } } @@ -709,38 +666,14 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) return 0; } -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) -{ - return NULL; -} - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) -{ - return NULL; -} - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) -{ - return NULL; -} - -struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void) -{ - return NULL; -} - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void) -{ - return NULL; -} - struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, - const struct kfd2kgd_calls *f2g) + unsigned int asic_type, bool vf) { return NULL; } bool kgd2kfd_device_init(struct kfd_dev *kfd, + struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources) { return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index e519df3fd2b6..069d5d230810 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -57,7 +57,7 @@ struct kgd_mem { unsigned int mapped_to_gpu_memory; uint64_t va; - uint32_t mapping_flags; + uint32_t alloc_flags; atomic_t invalid; struct amdkfd_process_info *process_info; @@ -137,12 +137,6 @@ int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine, void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle); bool amdgpu_amdkfd_have_atomics_support(struct kgd_dev *kgd); -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void); -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void); -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void); -struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void); -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions(void); - bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev); @@ -179,10 +173,17 @@ uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd); uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd); uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src); +/* Read user wptr from a specified user address space with page fault + * disabled. The memory must be pinned and mapped to the hardware when + * this is called in hqd_load functions, so it should never fault in + * the first place. This resolves a circular lock dependency involving + * four locks, including the DQM lock and mmap_sem. + */ #define read_user_wptr(mmptr, wptr, dst) \ ({ \ bool valid = false; \ if ((mmptr) && (wptr)) { \ + pagefault_disable(); \ if ((mmptr) == current->mm) { \ valid = !get_user((dst), (wptr)); \ } else if (current->mm == NULL) { \ @@ -190,6 +191,7 @@ uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *s valid = !get_user((dst), (wptr)); \ unuse_mm(mmptr); \ } \ + pagefault_enable(); \ } \ valid; \ }) @@ -240,8 +242,9 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo); int kgd2kfd_init(void); void kgd2kfd_exit(void); struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, struct pci_dev *pdev, - const struct kfd2kgd_calls *f2g); + unsigned int asic_type, bool vf); bool kgd2kfd_device_init(struct kfd_dev *kfd, + struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources); void kgd2kfd_device_exit(struct kfd_dev *kfd); void kgd2kfd_suspend(struct kfd_dev *kfd); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index c79aaebeeaf0..b6713e0ed1b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -19,10 +19,6 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - -#undef pr_fmt -#define pr_fmt(fmt) "kfd2kgd: " fmt - #include <linux/module.h> #include <linux/fdtable.h> #include <linux/uaccess.h> @@ -69,11 +65,11 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) return (struct v9_sdma_mqd *)mqd; } -static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t base[8] = { + uint32_t sdma_engine_reg_base[8] = { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, SOC15_REG_OFFSET(SDMA1, 0, @@ -91,111 +87,82 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, SOC15_REG_OFFSET(SDMA7, 0, mmSDMA7_RLC0_RB_CNTL) - mmSDMA7_RLC0_RB_CNTL }; - uint32_t retval; - retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL - - mmSDMA0_RLC0_RB_CNTL); + uint32_t retval = sdma_engine_reg_base[engine_id] + + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); - pr_debug("sdma base address: 0x%x\n", retval); + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, retval); return retval; } -static u32 sdma_v4_0_get_reg_offset(struct amdgpu_device *adev, - u32 instance, u32 offset) -{ - switch (instance) { - case 0: - return (adev->reg_offset[SDMA0_HWIP][0][0] + offset); - case 1: - return (adev->reg_offset[SDMA1_HWIP][0][1] + offset); - case 2: - return (adev->reg_offset[SDMA2_HWIP][0][1] + offset); - case 3: - return (adev->reg_offset[SDMA3_HWIP][0][1] + offset); - case 4: - return (adev->reg_offset[SDMA4_HWIP][0][1] + offset); - case 5: - return (adev->reg_offset[SDMA5_HWIP][0][1] + offset); - case 6: - return (adev->reg_offset[SDMA6_HWIP][0][1] + offset); - case 7: - return (adev->reg_offset[SDMA7_HWIP][0][1] + offset); - default: - break; - } - return 0; -} - static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, uint32_t __user *wptr, struct mm_struct *mm) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr, sdmax_gfx_context_cntl; + uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; uint32_t data; uint64_t data64; uint64_t __user *wptr64 = (uint64_t __user *)wptr; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdmax_gfx_context_cntl = sdma_v4_0_get_reg_offset(adev, - m->sdma_engine_id, mmSDMA0_GFX_CONTEXT_CNTL); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - data = RREG32(sdmax_gfx_context_cntl); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(sdmax_gfx_context_cntl, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, m->sdmax_rlcx_doorbell_offset); data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, m->sdmax_rlcx_rb_rptr_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); if (read_user_wptr(mm, wptr64, data64)) { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, lower_32_bits(data64)); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, upper_32_bits(data64)); } else { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, m->sdmax_rlcx_rb_rptr_hi); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdmax_rlcx_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdmax_rlcx_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdmax_rlcx_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -205,7 +172,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, uint32_t (**dump)[2], uint32_t *n_regs) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id); + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); uint32_t i = 0, reg; #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) @@ -215,15 +183,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return -ENOMEM; for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -235,14 +203,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -255,40 +223,42 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); m->sdmax_rlcx_rb_rptr_hi = - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI); + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); return 0; } -static const struct kfd2kgd_calls kfd2kgd = { +const struct kfd2kgd_calls arcturus_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, .init_interrupts = kgd_gfx_v9_init_interrupts, @@ -304,20 +274,11 @@ static const struct kfd2kgd_calls kfd2kgd = { .address_watch_execute = kgd_gfx_v9_address_watch_execute, .wave_control_execute = kgd_gfx_v9_wave_control_execute, .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid, - .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va, + .get_atc_vmid_pasid_mapping_info = + kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs, .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid, .get_hive_id = amdgpu_amdkfd_get_hive_id, }; - -struct kfd2kgd_calls *amdgpu_amdkfd_arcturus_get_functions(void) -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} - diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index d10f483f5e27..61cd707158e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -19,18 +19,9 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ -#undef pr_fmt -#define pr_fmt(fmt) "kfd2kgd: " fmt - -#include <linux/module.h> -#include <linux/fdtable.h> -#include <linux/uaccess.h> -#include <linux/firmware.h> #include <linux/mmu_context.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" -#include "amdgpu_ucode.h" -#include "soc15_hw_ip.h" #include "gc/gc_10_1_0_offset.h" #include "gc/gc_10_1_0_sh_mask.h" #include "navi10_enum.h" @@ -42,6 +33,7 @@ #include "v10_structs.h" #include "nv.h" #include "nvd.h" +#include "gfxhub_v2_0.h" enum hqd_dequeue_request_type { NO_ACTION = 0, @@ -50,63 +42,6 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -/* - * Register access functions - */ - -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, - uint32_t sh_mem_config, - uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases); -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, - unsigned int vmid); -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); -static int kgd_hqd_dump(struct kgd_dev *kgd, - uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, - uint32_t __user *wptr, struct mm_struct *mm); -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, - uint32_t engine_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, - enum kfd_preempt_type reset_type, - unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id); -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int utimeout); -#if 0 -static uint32_t get_watch_base_addr(struct amdgpu_device *adev); -#endif -static int kgd_address_watch_disable(struct kgd_dev *kgd); -static int kgd_address_watch_execute(struct kgd_dev *kgd, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo); -static int kgd_wave_control_execute(struct kgd_dev *kgd, - uint32_t gfx_index_val, - uint32_t sq_cmd); -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, - unsigned int watch_point_id, - unsigned int reg_offset); - -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid); -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid); -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base); -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); - /* Because of REG_GET_FIELD() being used, we put this function in the * asic specific file. */ @@ -139,37 +74,6 @@ static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd, return 0; } -static const struct kfd2kgd_calls kfd2kgd = { - .program_sh_mem_settings = kgd_program_sh_mem_settings, - .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, - .init_interrupts = kgd_init_interrupts, - .hqd_load = kgd_hqd_load, - .hqd_sdma_load = kgd_hqd_sdma_load, - .hqd_dump = kgd_hqd_dump, - .hqd_sdma_dump = kgd_hqd_sdma_dump, - .hqd_is_occupied = kgd_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, - .hqd_destroy = kgd_hqd_destroy, - .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_address_watch_disable, - .address_watch_execute = kgd_address_watch_execute, - .wave_control_execute = kgd_wave_control_execute, - .address_watch_get_offset = kgd_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = - get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = - get_atc_vmid_pasid_mapping_valid, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, - .set_vm_context_page_table_base = set_vm_context_page_table_base, - .get_tile_config = amdgpu_amdkfd_get_tile_config, -}; - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_10_0_get_functions() -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -250,11 +154,6 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, ATC_VMID0_PASID_MAPPING__VALID_MASK; pr_debug("pasid 0x%x vmid %d, reg value %x\n", pasid, vmid, pasid_mapping); - /* - * need to do this twice, once for gfx and once for mmhub - * for ATC add 16 to VMID for mmhub, for IH different registers. - * ATC_VMID0..15 registers are separate from ATC_VMID16..31. - */ pr_debug("ATHUB, reg %x\n", SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); WREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid, @@ -306,11 +205,11 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) return 0; } -static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t base[2] = { + uint32_t sdma_engine_reg_base[2] = { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, /* On gfx10, mmSDMA1_xxx registers are defined NOT based @@ -322,12 +221,12 @@ static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL }; - uint32_t retval; - retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL - - mmSDMA0_RLC0_RB_CNTL); + uint32_t retval = sdma_engine_reg_base[engine_id] + + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); - pr_debug("sdma base address: 0x%x\n", retval); + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, retval); return retval; } @@ -488,72 +387,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; - uint32_t sdma_base_addr, sdmax_gfx_context_cntl; + uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; uint32_t data; uint64_t data64; uint64_t __user *wptr64 = (uint64_t __user *)wptr; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - pr_debug("sdma load base addr %x for engine %d, queue %d\n", sdma_base_addr, m->sdma_engine_id, m->sdma_queue_id); - sdmax_gfx_context_cntl = m->sdma_engine_id ? - SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) : - SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - data = RREG32(sdmax_gfx_context_cntl); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(sdmax_gfx_context_cntl, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, m->sdmax_rlcx_doorbell_offset); data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, m->sdmax_rlcx_rb_rptr_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); if (read_user_wptr(mm, wptr64, data64)) { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, lower_32_bits(data64)); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, upper_32_bits(data64)); } else { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, m->sdmax_rlcx_rb_rptr_hi); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdmax_rlcx_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdmax_rlcx_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdmax_rlcx_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -563,28 +457,26 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, uint32_t (**dump)[2], uint32_t *n_regs) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id); + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); uint32_t i = 0, reg; #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) - pr_debug("sdma dump engine id %d queue_id %d\n", engine_id, queue_id); - pr_debug("sdma base addr %x\n", sdma_base_addr); - *dump = kmalloc(HQD_N_REGS*2*sizeof(uint32_t), GFP_KERNEL); if (*dump == NULL) return -ENOMEM; for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -618,14 +510,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -746,59 +638,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v10_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); m->sdmax_rlcx_rb_rptr_hi = - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI); + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); return 0; } -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid) +static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid) { - uint32_t reg; + uint32_t value; struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); - return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; -} - -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid) -{ - uint32_t reg; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; - reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) - + vmid); - return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid) @@ -830,6 +715,8 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; int vmid; + uint16_t queried_pasid; + bool ret; struct amdgpu_ring *ring = &adev->gfx.kiq.ring; if (amdgpu_emu_mode == 0 && ring->sched.ready) @@ -838,13 +725,13 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) for (vmid = 0; vmid < 16; vmid++) { if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) continue; - if (get_atc_vmid_pasid_mapping_valid(kgd, vmid)) { - if (get_atc_vmid_pasid_mapping_pasid(kgd, vmid) - == pasid) { - amdgpu_gmc_flush_gpu_tlb(adev, vmid, - AMDGPU_GFXHUB_0, 0); - break; - } + + ret = get_atc_vmid_pasid_mapping_info(kgd, vmid, + &queried_pasid); + if (ret && queried_pasid == pasid) { + amdgpu_gmc_flush_gpu_tlb(adev, vmid, + AMDGPU_GFXHUB_0, 0); + break; } } @@ -914,7 +801,6 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint64_t base = page_table_base | AMDGPU_PTE_VALID; if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) { pr_err("trying to set page table base for wrong VMID %u\n", @@ -922,18 +808,31 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, return; } - /* TODO: take advantage of per-process address space size. For - * now, all processes share the same address space size, like - * on GFX8 and older. - */ - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32) + (vmid*2), 0); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32) + (vmid*2), 0); - - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32) + (vmid*2), - lower_32_bits(adev->vm_manager.max_pfn - 1)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32) + (vmid*2), - upper_32_bits(adev->vm_manager.max_pfn - 1)); - - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32) + (vmid*2), lower_32_bits(base)); - WREG32(SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + (vmid*2), upper_32_bits(base)); + /* SDMA is on gfxhub as well for Navi1* series */ + gfxhub_v2_0_setup_vm_pt_regs(adev, vmid, page_table_base); } + +const struct kfd2kgd_calls gfx_v10_kfd2kgd = { + .program_sh_mem_settings = kgd_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, + .init_interrupts = kgd_init_interrupts, + .hqd_load = kgd_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_dump = kgd_hqd_dump, + .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_is_occupied = kgd_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_address_watch_disable, + .address_watch_execute = kgd_address_watch_execute, + .wave_control_execute = kgd_wave_control_execute, + .address_watch_get_offset = kgd_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_info = + get_atc_vmid_pasid_mapping_info, + .get_tile_config = amdgpu_amdkfd_get_tile_config, + .set_vm_context_page_table_base = set_vm_context_page_table_base, + .invalidate_tlbs = invalidate_tlbs, + .invalidate_tlbs_vmid = invalidate_tlbs_vmid, + .get_hive_id = amdgpu_amdkfd_get_hive_id, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index 5f459bf5f622..6e6f0a99ec06 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -20,8 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <linux/fdtable.h> -#include <linux/uaccess.h> #include <linux/mmu_context.h> #include "amdgpu.h" @@ -86,65 +84,6 @@ union TCP_WATCH_CNTL_BITS { float f32All; }; -/* - * Register access functions - */ - -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, - uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, - uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); - -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, - unsigned int vmid); - -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); -static int kgd_hqd_dump(struct kgd_dev *kgd, - uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, - uint32_t __user *wptr, struct mm_struct *mm); -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, - uint32_t engine_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); - -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, - enum kfd_preempt_type reset_type, - unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id); -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int utimeout); -static int kgd_address_watch_disable(struct kgd_dev *kgd); -static int kgd_address_watch_execute(struct kgd_dev *kgd, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo); -static int kgd_wave_control_execute(struct kgd_dev *kgd, - uint32_t gfx_index_val, - uint32_t sq_cmd); -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, - unsigned int watch_point_id, - unsigned int reg_offset); - -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid); -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid); - -static void set_scratch_backing_va(struct kgd_dev *kgd, - uint64_t va, uint32_t vmid); -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base); -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); -static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd); - /* Because of REG_GET_FIELD() being used, we put this function in the * asic specific file. */ @@ -170,37 +109,6 @@ static int get_tile_config(struct kgd_dev *kgd, return 0; } -static const struct kfd2kgd_calls kfd2kgd = { - .program_sh_mem_settings = kgd_program_sh_mem_settings, - .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, - .init_interrupts = kgd_init_interrupts, - .hqd_load = kgd_hqd_load, - .hqd_sdma_load = kgd_hqd_sdma_load, - .hqd_dump = kgd_hqd_dump, - .hqd_sdma_dump = kgd_hqd_sdma_dump, - .hqd_is_occupied = kgd_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, - .hqd_destroy = kgd_hqd_destroy, - .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_address_watch_disable, - .address_watch_execute = kgd_address_watch_execute, - .wave_control_execute = kgd_wave_control_execute, - .address_watch_get_offset = kgd_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid, - .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, - .set_vm_context_page_table_base = set_vm_context_page_table_base, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, - .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, -}; - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void) -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -303,14 +211,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) return 0; } -static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m) +static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m) { uint32_t retval; retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET + m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET; - pr_debug("sdma base address: 0x%x\n", retval); + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", + m->sdma_engine_id, m->sdma_queue_id, retval); return retval; } @@ -413,60 +322,52 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; unsigned long end_jiffies; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t data; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - if (m->sdma_engine_id) { - data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); - data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); - } else { - data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); - } data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdma_rlc_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdma_rlc_rb_rptr); if (read_user_wptr(mm, wptr, data)) - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data); else - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdma_rlc_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR, m->sdma_rlc_virtual_addr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdma_rlc_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdma_rlc_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdma_rlc_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -524,13 +425,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -645,32 +546,34 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct cik_sdma_rlc_registers *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdma_rlc_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); return 0; } @@ -758,24 +661,16 @@ static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset]; } -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid) +static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid) { - uint32_t reg; + uint32_t value; struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; -} - -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid) -{ - uint32_t reg; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; - reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } static void set_scratch_backing_va(struct kgd_dev *kgd, @@ -855,3 +750,28 @@ static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd) return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); } + +const struct kfd2kgd_calls gfx_v7_kfd2kgd = { + .program_sh_mem_settings = kgd_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, + .init_interrupts = kgd_init_interrupts, + .hqd_load = kgd_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_dump = kgd_hqd_dump, + .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_is_occupied = kgd_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_address_watch_disable, + .address_watch_execute = kgd_address_watch_execute, + .wave_control_execute = kgd_wave_control_execute, + .address_watch_get_offset = kgd_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info, + .set_scratch_backing_va = set_scratch_backing_va, + .get_tile_config = get_tile_config, + .set_vm_context_page_table_base = set_vm_context_page_table_base, + .invalidate_tlbs = invalidate_tlbs, + .invalidate_tlbs_vmid = invalidate_tlbs_vmid, + .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 6d2f61449606..bfbddedb2380 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -20,9 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ -#include <linux/module.h> -#include <linux/fdtable.h> -#include <linux/uaccess.h> #include <linux/mmu_context.h> #include "amdgpu.h" @@ -44,62 +41,6 @@ enum hqd_dequeue_request_type { RESET_WAVES }; -/* - * Register access functions - */ - -static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid, - uint32_t sh_mem_config, - uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases); -static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, - unsigned int vmid); -static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id); -static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, - uint32_t queue_id, uint32_t __user *wptr, - uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); -static int kgd_hqd_dump(struct kgd_dev *kgd, - uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, - uint32_t __user *wptr, struct mm_struct *mm); -static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, - uint32_t engine_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); -static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id); -static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd); -static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd, - enum kfd_preempt_type reset_type, - unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id); -static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, - unsigned int utimeout); -static int kgd_address_watch_disable(struct kgd_dev *kgd); -static int kgd_address_watch_execute(struct kgd_dev *kgd, - unsigned int watch_point_id, - uint32_t cntl_val, - uint32_t addr_hi, - uint32_t addr_lo); -static int kgd_wave_control_execute(struct kgd_dev *kgd, - uint32_t gfx_index_val, - uint32_t sq_cmd); -static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd, - unsigned int watch_point_id, - unsigned int reg_offset); - -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid); -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid); -static void set_scratch_backing_va(struct kgd_dev *kgd, - uint64_t va, uint32_t vmid); -static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, - uint64_t page_table_base); -static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); -static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); - /* Because of REG_GET_FIELD() being used, we put this function in the * asic specific file. */ @@ -125,38 +66,6 @@ static int get_tile_config(struct kgd_dev *kgd, return 0; } -static const struct kfd2kgd_calls kfd2kgd = { - .program_sh_mem_settings = kgd_program_sh_mem_settings, - .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, - .init_interrupts = kgd_init_interrupts, - .hqd_load = kgd_hqd_load, - .hqd_sdma_load = kgd_hqd_sdma_load, - .hqd_dump = kgd_hqd_dump, - .hqd_sdma_dump = kgd_hqd_sdma_dump, - .hqd_is_occupied = kgd_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, - .hqd_destroy = kgd_hqd_destroy, - .hqd_sdma_destroy = kgd_hqd_sdma_destroy, - .address_watch_disable = kgd_address_watch_disable, - .address_watch_execute = kgd_address_watch_execute, - .wave_control_execute = kgd_wave_control_execute, - .address_watch_get_offset = kgd_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = - get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = - get_atc_vmid_pasid_mapping_valid, - .set_scratch_backing_va = set_scratch_backing_va, - .get_tile_config = get_tile_config, - .set_vm_context_page_table_base = set_vm_context_page_table_base, - .invalidate_tlbs = invalidate_tlbs, - .invalidate_tlbs_vmid = invalidate_tlbs_vmid, -}; - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void) -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} - static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd) { return (struct amdgpu_device *)kgd; @@ -260,13 +169,15 @@ static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) return 0; } -static inline uint32_t get_sdma_base_addr(struct vi_sdma_mqd *m) +static inline uint32_t get_sdma_rlc_reg_offset(struct vi_sdma_mqd *m) { uint32_t retval; retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET + m->sdma_queue_id * KFD_VI_SDMA_QUEUE_OFFSET; - pr_debug("sdma base address: 0x%x\n", retval); + + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", + m->sdma_engine_id, m->sdma_queue_id, retval); return retval; } @@ -398,59 +309,51 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; unsigned long end_jiffies; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t data; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - if (m->sdma_engine_id) { - data = RREG32(mmSDMA1_GFX_CONTEXT_CNTL); - data = REG_SET_FIELD(data, SDMA1_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(mmSDMA1_GFX_CONTEXT_CNTL, data); - } else { - data = RREG32(mmSDMA0_GFX_CONTEXT_CNTL); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(mmSDMA0_GFX_CONTEXT_CNTL, data); - } data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); if (read_user_wptr(mm, wptr, data)) - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data); else - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR, m->sdmax_rlcx_virtual_addr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdmax_rlcx_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdmax_rlcx_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdmax_rlcx_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -517,13 +420,13 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -641,54 +544,48 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct vi_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(m); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); return 0; } -static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid) +static bool get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid) { - uint32_t reg; + uint32_t value; struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; -} + value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; -static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid) -{ - uint32_t reg; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - - reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); - return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } static int kgd_address_watch_disable(struct kgd_dev *kgd) @@ -798,3 +695,28 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid) RREG32(mmVM_INVALIDATE_RESPONSE); return 0; } + +const struct kfd2kgd_calls gfx_v8_kfd2kgd = { + .program_sh_mem_settings = kgd_program_sh_mem_settings, + .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping, + .init_interrupts = kgd_init_interrupts, + .hqd_load = kgd_hqd_load, + .hqd_sdma_load = kgd_hqd_sdma_load, + .hqd_dump = kgd_hqd_dump, + .hqd_sdma_dump = kgd_hqd_sdma_dump, + .hqd_is_occupied = kgd_hqd_is_occupied, + .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, + .hqd_destroy = kgd_hqd_destroy, + .hqd_sdma_destroy = kgd_hqd_sdma_destroy, + .address_watch_disable = kgd_address_watch_disable, + .address_watch_execute = kgd_address_watch_execute, + .wave_control_execute = kgd_wave_control_execute, + .address_watch_get_offset = kgd_address_watch_get_offset, + .get_atc_vmid_pasid_mapping_info = + get_atc_vmid_pasid_mapping_info, + .set_scratch_backing_va = set_scratch_backing_va, + .get_tile_config = get_tile_config, + .set_vm_context_page_table_base = set_vm_context_page_table_base, + .invalidate_tlbs = invalidate_tlbs, + .invalidate_tlbs_vmid = invalidate_tlbs_vmid, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index e262f2ac07a3..47c853ef1051 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -19,17 +19,10 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - -#define pr_fmt(fmt) "kfd2kgd: " fmt - -#include <linux/module.h> -#include <linux/fdtable.h> -#include <linux/uaccess.h> #include <linux/mmu_context.h> #include "amdgpu.h" #include "amdgpu_amdkfd.h" -#include "soc15_hw_ip.h" #include "gc/gc_9_0_offset.h" #include "gc/gc_9_0_sh_mask.h" #include "vega10_enum.h" @@ -50,9 +43,6 @@ #include "gmc_v9_0.h" -#define V9_PIPE_PER_MEC (4) -#define V9_QUEUES_PER_PIPE_MEC (8) - enum hqd_dequeue_request_type { NO_ACTION = 0, DRAIN_PIPE, @@ -226,22 +216,21 @@ int kgd_gfx_v9_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id) return 0; } -static uint32_t get_sdma_base_addr(struct amdgpu_device *adev, +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, unsigned int engine_id, unsigned int queue_id) { - uint32_t base[2] = { + uint32_t sdma_engine_reg_base[2] = { SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL, SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_RLC0_RB_CNTL) - mmSDMA1_RLC0_RB_CNTL }; - uint32_t retval; + uint32_t retval = sdma_engine_reg_base[engine_id] + + queue_id * (mmSDMA0_RLC1_RB_CNTL - mmSDMA0_RLC0_RB_CNTL); - retval = base[engine_id] + queue_id * (mmSDMA0_RLC1_RB_CNTL - - mmSDMA0_RLC0_RB_CNTL); - - pr_debug("sdma base address: 0x%x\n", retval); + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, retval); return retval; } @@ -388,71 +377,67 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr, sdmax_gfx_context_cntl; + uint32_t sdma_rlc_reg_offset; unsigned long end_jiffies; uint32_t data; uint64_t data64; uint64_t __user *wptr64 = (uint64_t __user *)wptr; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdmax_gfx_context_cntl = m->sdma_engine_id ? - SOC15_REG_OFFSET(SDMA1, 0, mmSDMA1_GFX_CONTEXT_CNTL) : - SOC15_REG_OFFSET(SDMA0, 0, mmSDMA0_GFX_CONTEXT_CNTL); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, m->sdmax_rlcx_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)); end_jiffies = msecs_to_jiffies(2000) + jiffies; while (true) { - data = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - data = RREG32(sdmax_gfx_context_cntl); - data = REG_SET_FIELD(data, SDMA0_GFX_CONTEXT_CNTL, - RESUME_CTX, 0); - WREG32(sdmax_gfx_context_cntl, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL_OFFSET, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL_OFFSET, m->sdmax_rlcx_doorbell_offset); data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA0_RLC0_DOORBELL, ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, data); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI, m->sdmax_rlcx_rb_rptr_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 1); if (read_user_wptr(mm, wptr64, data64)) { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, lower_32_bits(data64)); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, upper_32_bits(data64)); } else { - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, m->sdmax_rlcx_rb_rptr); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR_HI, m->sdmax_rlcx_rb_rptr_hi); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_MINOR_PTR_UPDATE, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI, m->sdmax_rlcx_rb_base_hi); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO, m->sdmax_rlcx_rb_rptr_addr_lo); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI, m->sdmax_rlcx_rb_rptr_addr_hi); data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA0_RLC0_RB_CNTL, RB_ENABLE, 1); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, data); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data); return 0; } @@ -462,7 +447,8 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, uint32_t (**dump)[2], uint32_t *n_regs) { struct amdgpu_device *adev = get_amdgpu_device(kgd); - uint32_t sdma_base_addr = get_sdma_base_addr(adev, engine_id, queue_id); + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); uint32_t i = 0, reg; #undef HQD_N_REGS #define HQD_N_REGS (19+6+7+10) @@ -472,15 +458,15 @@ static int kgd_hqd_sdma_dump(struct kgd_dev *kgd, return -ENOMEM; for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_STATUS; reg <= mmSDMA0_RLC0_CSA_ADDR_HI; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_IB_SUB_REMAIN; reg <= mmSDMA0_RLC0_MINOR_PTR_UPDATE; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); for (reg = mmSDMA0_RLC0_MIDCMD_DATA0; reg <= mmSDMA0_RLC0_MIDCMD_CNTL; reg++) - DUMP_REG(sdma_base_addr + reg); + DUMP_REG(sdma_rlc_reg_offset + reg); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -514,14 +500,14 @@ static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t sdma_rlc_rb_cntl; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK) return true; @@ -584,59 +570,52 @@ static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd, { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct v9_sdma_mqd *m; - uint32_t sdma_base_addr; + uint32_t sdma_rlc_reg_offset; uint32_t temp; unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; m = get_sdma_mqd(mqd); - sdma_base_addr = get_sdma_base_addr(adev, m->sdma_engine_id, + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, m->sdma_queue_id); - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL); temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK; - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp); while (true) { - temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS); + temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS); if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK) break; - if (time_after(jiffies, end_jiffies)) + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); return -ETIME; + } usleep_range(500, 1000); } - WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0); - WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL) | + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) | SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK); - m->sdmax_rlcx_rb_rptr = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR); m->sdmax_rlcx_rb_rptr_hi = - RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_HI); + RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_HI); return 0; } -bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid) +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid) { - uint32_t reg; + uint32_t value; struct amdgpu_device *adev = (struct amdgpu_device *) kgd; - reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + value = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) + vmid); - return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK; -} - -uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid) -{ - uint32_t reg; - struct amdgpu_device *adev = (struct amdgpu_device *) kgd; + *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK; - reg = RREG32(SOC15_REG_OFFSET(ATHUB, 0, mmATC_VMID0_PASID_MAPPING) - + vmid); - return reg & ATC_VMID0_PASID_MAPPING__PASID_MASK; + return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK); } static int invalidate_tlbs_with_kiq(struct amdgpu_device *adev, uint16_t pasid, @@ -671,6 +650,8 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) { struct amdgpu_device *adev = (struct amdgpu_device *) kgd; int vmid, i; + uint16_t queried_pasid; + bool ret; struct amdgpu_ring *ring = &adev->gfx.kiq.ring; uint32_t flush_type = 0; @@ -686,14 +667,14 @@ int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid) for (vmid = 0; vmid < 16; vmid++) { if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) continue; - if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(kgd, vmid)) { - if (kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(kgd, vmid) - == pasid) { - for (i = 0; i < adev->num_vmhubs; i++) - amdgpu_gmc_flush_gpu_tlb(adev, vmid, - i, flush_type); - break; - } + + ret = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(kgd, vmid, + &queried_pasid); + if (ret && queried_pasid == pasid) { + for (i = 0; i < adev->num_vmhubs; i++) + amdgpu_gmc_flush_gpu_tlb(adev, vmid, + i, flush_type); + break; } } @@ -777,15 +758,6 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, return 0; } -void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd, - uint64_t va, uint32_t vmid) -{ - /* No longer needed on GFXv9. The scratch base address is - * passed to the shader by the CP. It's the user mode driver's - * responsibility. - */ -} - void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base) { @@ -811,7 +783,7 @@ void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmi gfxhub_v1_0_setup_vm_pt_regs(adev, vmid, page_table_base); } -static const struct kfd2kgd_calls kfd2kgd = { +const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .program_sh_mem_settings = kgd_gfx_v9_program_sh_mem_settings, .set_pasid_vmid_mapping = kgd_gfx_v9_set_pasid_vmid_mapping, .init_interrupts = kgd_gfx_v9_init_interrupts, @@ -827,19 +799,11 @@ static const struct kfd2kgd_calls kfd2kgd = { .address_watch_execute = kgd_gfx_v9_address_watch_execute, .wave_control_execute = kgd_gfx_v9_wave_control_execute, .address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset, - .get_atc_vmid_pasid_mapping_pasid = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid, - .get_atc_vmid_pasid_mapping_valid = - kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid, - .set_scratch_backing_va = kgd_gfx_v9_set_scratch_backing_va, + .get_atc_vmid_pasid_mapping_info = + kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, .get_tile_config = kgd_gfx_v9_get_tile_config, .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .invalidate_tlbs = kgd_gfx_v9_invalidate_tlbs, .invalidate_tlbs_vmid = kgd_gfx_v9_invalidate_tlbs_vmid, .get_hive_id = amdgpu_amdkfd_get_hive_id, }; - -struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void) -{ - return (struct kfd2kgd_calls *)&kfd2kgd; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 26d8879bff9d..d9e9ad22b2bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -55,14 +55,10 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd, unsigned int watch_point_id, unsigned int reg_offset); -bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, - uint8_t vmid); -uint16_t kgd_gfx_v9_get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd, - uint8_t vmid); +bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd, + uint8_t vmid, uint16_t *p_pasid); void kgd_gfx_v9_set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid, uint64_t page_table_base); -void kgd_gfx_v9_set_scratch_backing_va(struct kgd_dev *kgd, - uint64_t va, uint32_t vmid); int kgd_gfx_v9_invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid); int kgd_gfx_v9_invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid); int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 6d021ecc8d59..ae6f5446262c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -19,9 +19,6 @@ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ - -#define pr_fmt(fmt) "kfd2kgd: " fmt - #include <linux/dma-buf.h> #include <linux/list.h> #include <linux/pagemap.h> @@ -33,11 +30,6 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_dma_buf.h" -/* Special VM and GART address alignment needed for VI pre-Fiji due to - * a HW bug. - */ -#define VI_BO_SIZE_ALIGN (0x8000) - /* BO flag to indicate a KFD userptr BO */ #define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63) @@ -349,13 +341,46 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync) struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); int ret; - ret = amdgpu_vm_update_directories(adev, vm); + ret = amdgpu_vm_update_pdes(adev, vm, false); if (ret) return ret; return amdgpu_sync_fence(NULL, sync, vm->last_update, false); } +static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem) +{ + struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); + bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT; + uint32_t mapping_flags; + + mapping_flags = AMDGPU_VM_PAGE_READABLE; + if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE) + mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; + if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE) + mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; + + switch (adev->asic_type) { + case CHIP_ARCTURUS: + if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) { + if (bo_adev == adev) + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW; + else + mapping_flags |= AMDGPU_VM_MTYPE_UC; + } else { + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; + } + break; + default: + mapping_flags |= coherent ? + AMDGPU_VM_MTYPE_UC : AMDGPU_VM_MTYPE_NC; + } + + return amdgpu_gem_va_map_flags(adev, mapping_flags); +} + /* add_bo_to_vm - Add a BO to a VM * * Everything that needs to bo done only once when a BO is first added @@ -404,8 +429,7 @@ static int add_bo_to_vm(struct amdgpu_device *adev, struct kgd_mem *mem, } bo_va_entry->va = va; - bo_va_entry->pte_flags = amdgpu_gmc_get_pte_flags(adev, - mem->mapping_flags); + bo_va_entry->pte_flags = get_pte_flags(adev, mem); bo_va_entry->kgd_dev = (void *)adev; list_add(&bo_va_entry->bo_list, list_bo_va); @@ -586,7 +610,7 @@ static int reserve_bo_and_vm(struct kgd_mem *mem, amdgpu_vm_get_pd_bo(vm, &ctx->list, &ctx->vm_pd[0]); ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, - false, &ctx->duplicates, true); + false, &ctx->duplicates); if (!ret) ctx->reserved = true; else { @@ -659,7 +683,7 @@ static int reserve_bo_and_cond_vms(struct kgd_mem *mem, } ret = ttm_eu_reserve_buffers(&ctx->ticket, &ctx->list, - false, &ctx->duplicates, true); + false, &ctx->duplicates); if (!ret) ctx->reserved = true; else @@ -1079,10 +1103,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( uint64_t user_addr = 0; struct amdgpu_bo *bo; struct amdgpu_bo_param bp; - int byte_align; u32 domain, alloc_domain; u64 alloc_flags; - uint32_t mapping_flags; int ret; /* @@ -1135,25 +1157,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( if ((*mem)->aql_queue) size = size >> 1; - /* Workaround for TLB bug on older VI chips */ - byte_align = (adev->family == AMDGPU_FAMILY_VI && - adev->asic_type != CHIP_FIJI && - adev->asic_type != CHIP_POLARIS10 && - adev->asic_type != CHIP_POLARIS11 && - adev->asic_type != CHIP_POLARIS12 && - adev->asic_type != CHIP_VEGAM) ? - VI_BO_SIZE_ALIGN : 1; - - mapping_flags = AMDGPU_VM_PAGE_READABLE; - if (flags & ALLOC_MEM_FLAGS_WRITABLE) - mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE; - if (flags & ALLOC_MEM_FLAGS_EXECUTABLE) - mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE; - if (flags & ALLOC_MEM_FLAGS_COHERENT) - mapping_flags |= AMDGPU_VM_MTYPE_UC; - else - mapping_flags |= AMDGPU_VM_MTYPE_NC; - (*mem)->mapping_flags = mapping_flags; + (*mem)->alloc_flags = flags; amdgpu_sync_create(&(*mem)->sync); @@ -1168,7 +1172,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( memset(&bp, 0, sizeof(bp)); bp.size = size; - bp.byte_align = byte_align; + bp.byte_align = 1; bp.domain = alloc_domain; bp.flags = alloc_flags; bp.type = bo_type; @@ -1626,9 +1630,10 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd, INIT_LIST_HEAD(&(*mem)->bo_va_list); mutex_init(&(*mem)->lock); - (*mem)->mapping_flags = - AMDGPU_VM_PAGE_READABLE | AMDGPU_VM_PAGE_WRITEABLE | - AMDGPU_VM_PAGE_EXECUTABLE | AMDGPU_VM_MTYPE_NC; + (*mem)->alloc_flags = + ((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ? + ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) | + ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE; (*mem)->bo = amdgpu_bo_ref(bo); (*mem)->va = va; @@ -1797,8 +1802,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) } /* Reserve all BOs and page tables for validation */ - ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates, - true); + ret = ttm_eu_reserve_buffers(&ticket, &resv_list, false, &duplicates); WARN(!list_empty(&duplicates), "Duplicates should be empty"); if (ret) goto out_free; @@ -1996,7 +2000,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) } ret = ttm_eu_reserve_buffers(&ctx.ticket, &ctx.list, - false, &duplicate_save, true); + false, &duplicate_save); if (ret) { pr_debug("Memory eviction: TTM Reserve Failed. Try again\n"); goto ttm_reserve_fail; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 1c9d40f97a9b..72232fccf61a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -2038,6 +2038,11 @@ int amdgpu_atombios_init(struct amdgpu_device *adev) if (adev->is_atom_fw) { amdgpu_atomfirmware_scratch_regs_init(adev); amdgpu_atomfirmware_allocate_fb_scratch(adev); + ret = amdgpu_atomfirmware_get_mem_train_fb_loc(adev); + if (ret) { + DRM_ERROR("Failed to get mem train fb location.\n"); + return ret; + } } else { amdgpu_atombios_scratch_regs_init(adev); amdgpu_atombios_allocate_fb_scratch(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index daf687428cdb..ff4eb96bdfb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -27,6 +27,7 @@ #include "amdgpu_atomfirmware.h" #include "atom.h" #include "atombios.h" +#include "soc15_hw_ip.h" bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev) { @@ -120,65 +121,14 @@ union vram_info { struct atom_vram_info_header_v2_3 v23; struct atom_vram_info_header_v2_4 v24; }; -/* - * Return vram width from integrated system info table, if available, - * or 0 if not. - */ -int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - int index; - u16 data_offset, size; - union igp_info *igp_info; - union vram_info *vram_info; - u32 mem_channel_number; - u32 mem_channel_width; - u8 frev, crev; - - if (adev->flags & AMD_IS_APU) - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - integratedsysteminfo); - else - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - vram_info); - /* get any igp specific overrides */ - if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, - &frev, &crev, &data_offset)) { - if (adev->flags & AMD_IS_APU) { - igp_info = (union igp_info *) - (mode_info->atom_context->bios + data_offset); - switch (crev) { - case 11: - mem_channel_number = igp_info->v11.umachannelnumber; - /* channel width is 64 */ - return mem_channel_number * 64; - default: - return 0; - } - } else { - vram_info = (union vram_info *) - (mode_info->atom_context->bios + data_offset); - switch (crev) { - case 3: - mem_channel_number = vram_info->v23.vram_module[0].channel_num; - mem_channel_width = vram_info->v23.vram_module[0].channel_width; - return mem_channel_number * (1 << mem_channel_width); - case 4: - mem_channel_number = vram_info->v24.vram_module[0].channel_num; - mem_channel_width = vram_info->v24.vram_module[0].channel_width; - return mem_channel_number * (1 << mem_channel_width); - default: - return 0; - } - } - } - - return 0; -} +union vram_module { + struct atom_vram_module_v9 v9; + struct atom_vram_module_v10 v10; +}; -static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev, - int atom_mem_type) +static int convert_atom_mem_type_to_vram_type(struct amdgpu_device *adev, + int atom_mem_type) { int vram_type; @@ -219,19 +169,25 @@ static int convert_atom_mem_type_to_vram_type (struct amdgpu_device *adev, return vram_type; } -/* - * Return vram type from either integrated system info table - * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not - */ -int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev) + + +int +amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, + int *vram_width, int *vram_type, + int *vram_vendor) { struct amdgpu_mode_info *mode_info = &adev->mode_info; - int index; + int index, i = 0; u16 data_offset, size; union igp_info *igp_info; union vram_info *vram_info; + union vram_module *vram_module; u8 frev, crev; u8 mem_type; + u8 mem_vendor; + u32 mem_channel_number; + u32 mem_channel_width; + u32 module_id; if (adev->flags & AMD_IS_APU) index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, @@ -239,6 +195,7 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev) else index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_info); + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { @@ -247,25 +204,67 @@ int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev) (mode_info->atom_context->bios + data_offset); switch (crev) { case 11: + mem_channel_number = igp_info->v11.umachannelnumber; + /* channel width is 64 */ + if (vram_width) + *vram_width = mem_channel_number * 64; mem_type = igp_info->v11.memorytype; - return convert_atom_mem_type_to_vram_type(adev, mem_type); + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + break; default: - return 0; + return -EINVAL; } } else { vram_info = (union vram_info *) (mode_info->atom_context->bios + data_offset); + module_id = (RREG32(adev->bios_scratch_reg_offset + 4) & 0x00ff0000) >> 16; switch (crev) { case 3: - mem_type = vram_info->v23.vram_module[0].memory_type; - return convert_atom_mem_type_to_vram_type(adev, mem_type); + if (module_id > vram_info->v23.vram_module_num) + module_id = 0; + vram_module = (union vram_module *)vram_info->v23.vram_module; + while (i < module_id) { + vram_module = (union vram_module *) + ((u8 *)vram_module + vram_module->v9.vram_module_size); + i++; + } + mem_type = vram_module->v9.memory_type; + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + mem_channel_number = vram_module->v9.channel_num; + mem_channel_width = vram_module->v9.channel_width; + if (vram_width) + *vram_width = mem_channel_number * (1 << mem_channel_width); + mem_vendor = (vram_module->v9.vender_rev_id) & 0xF; + if (vram_vendor) + *vram_vendor = mem_vendor; + break; case 4: - mem_type = vram_info->v24.vram_module[0].memory_type; - return convert_atom_mem_type_to_vram_type(adev, mem_type); + if (module_id > vram_info->v24.vram_module_num) + module_id = 0; + vram_module = (union vram_module *)vram_info->v24.vram_module; + while (i < module_id) { + vram_module = (union vram_module *) + ((u8 *)vram_module + vram_module->v10.vram_module_size); + i++; + } + mem_type = vram_module->v10.memory_type; + if (vram_type) + *vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type); + mem_channel_number = vram_module->v10.channel_num; + mem_channel_width = vram_module->v10.channel_width; + if (vram_width) + *vram_width = mem_channel_number * (1 << mem_channel_width); + mem_vendor = (vram_module->v10.vender_rev_id) & 0xF; + if (vram_vendor) + *vram_vendor = mem_vendor; + break; default: - return 0; + return -EINVAL; } } + } return 0; @@ -464,3 +463,138 @@ int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev) } return -EINVAL; } + +/* + * Check if VBIOS supports GDDR6 training data save/restore + */ +static bool gddr6_mem_train_vbios_support(struct amdgpu_device *adev) +{ + uint16_t data_offset; + int index; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + firmwareinfo); + if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, NULL, + NULL, NULL, &data_offset)) { + struct atom_firmware_info_v3_1 *firmware_info = + (struct atom_firmware_info_v3_1 *)(adev->mode_info.atom_context->bios + + data_offset); + + DRM_DEBUG("atom firmware capability:0x%08x.\n", + le32_to_cpu(firmware_info->firmware_capability)); + + if (le32_to_cpu(firmware_info->firmware_capability) & + ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING) + return true; + } + + return false; +} + +static int gddr6_mem_train_support(struct amdgpu_device *adev) +{ + int ret; + uint32_t major, minor, revision, hw_v; + + if (gddr6_mem_train_vbios_support(adev)) { + amdgpu_discovery_get_ip_version(adev, MP0_HWID, &major, &minor, &revision); + hw_v = HW_REV(major, minor, revision); + /* + * treat 0 revision as a special case since register for MP0 and MMHUB is missing + * for some Navi10 A0, preventing driver from discovering the hwip information since + * none of the functions will be initialized, it should not cause any problems + */ + switch (hw_v) { + case HW_REV(11, 0, 0): + case HW_REV(11, 0, 5): + ret = 1; + break; + default: + DRM_ERROR("memory training vbios supports but psp hw(%08x)" + " doesn't support!\n", hw_v); + ret = -1; + break; + } + } else { + ret = 0; + hw_v = -1; + } + + + DRM_DEBUG("mp0 hw_v %08x, ret:%d.\n", hw_v, ret); + return ret; +} + +int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev) +{ + struct atom_context *ctx = adev->mode_info.atom_context; + unsigned char *bios = ctx->bios; + struct vram_reserve_block *reserved_block; + int index, block_number; + uint8_t frev, crev; + uint16_t data_offset, size; + uint32_t start_address_in_kb; + uint64_t offset; + int ret; + + adev->fw_vram_usage.mem_train_support = false; + + if (adev->asic_type != CHIP_NAVI10 && + adev->asic_type != CHIP_NAVI14) + return 0; + + if (amdgpu_sriov_vf(adev)) + return 0; + + ret = gddr6_mem_train_support(adev); + if (ret == -1) + return -EINVAL; + else if (ret == 0) + return 0; + + index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, + vram_usagebyfirmware); + ret = amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, + &data_offset); + if (ret == 0) { + DRM_ERROR("parse data header failed.\n"); + return -EINVAL; + } + + DRM_DEBUG("atom firmware common table header size:0x%04x, frev:0x%02x," + " crev:0x%02x, data_offset:0x%04x.\n", size, frev, crev, data_offset); + /* only support 2.1+ */ + if (((uint16_t)frev << 8 | crev) < 0x0201) { + DRM_ERROR("frev:0x%02x, crev:0x%02x < 2.1 !\n", frev, crev); + return -EINVAL; + } + + reserved_block = (struct vram_reserve_block *) + (bios + data_offset + sizeof(struct atom_common_table_header)); + block_number = ((unsigned int)size - sizeof(struct atom_common_table_header)) + / sizeof(struct vram_reserve_block); + reserved_block += (block_number > 0) ? block_number-1 : 0; + DRM_DEBUG("block_number:0x%04x, last block: 0x%08xkb sz, %dkb fw, %dkb drv.\n", + block_number, + le32_to_cpu(reserved_block->start_address_in_kb), + le16_to_cpu(reserved_block->used_by_firmware_in_kb), + le16_to_cpu(reserved_block->used_by_driver_in_kb)); + if (reserved_block->used_by_firmware_in_kb > 0) { + start_address_in_kb = le32_to_cpu(reserved_block->start_address_in_kb); + offset = (uint64_t)start_address_in_kb * ONE_KiB; + if ((offset & (ONE_MiB - 1)) < (4 * ONE_KiB + 1) ) { + offset -= ONE_MiB; + } + + offset &= ~(ONE_MiB - 1); + adev->fw_vram_usage.mem_train_fb_loc = offset; + adev->fw_vram_usage.mem_train_support = true; + DRM_DEBUG("mem_train_fb_loc:0x%09llx.\n", offset); + ret = 0; + } else { + DRM_ERROR("used_by_firmware_in_kb is 0!\n"); + ret = -EINVAL; + } + + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h index 5ec6f92f353c..f871af5ea6f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.h @@ -29,8 +29,9 @@ bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device *adev); void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev); int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev); -int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device *adev); -int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device *adev); +int amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev, + int *vram_width, int *vram_type, int *vram_vendor); +int amdgpu_atomfirmware_get_mem_train_fb_loc(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device *adev); int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device *adev); bool amdgpu_atomfirmware_mem_ecc_supported(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 3e35a8f2c5e5..a97fb759e2f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -613,17 +613,7 @@ static bool amdgpu_atpx_detect(void) bool d3_supported = false; struct pci_dev *parent_pdev; - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { - vga_count++; - - has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); - - parent_pdev = pci_upstream_bridge(pdev); - d3_supported |= parent_pdev && parent_pdev->bridge_d3; - amdgpu_atpx_get_quirks(pdev); - } - - while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_OTHER << 8, pdev)) != NULL) { + while ((pdev = pci_get_class(PCI_BASE_CLASS_DISPLAY << 16, pdev)) != NULL) { vga_count++; has_atpx |= (amdgpu_atpx_pci_probe_handle(pdev) == true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c index 649e68c4479b..d1495e1c9289 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_benchmark.c @@ -33,7 +33,7 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, { unsigned long start_jiffies; unsigned long end_jiffies; - struct dma_fence *fence = NULL; + struct dma_fence *fence; int i, r; start_jiffies = jiffies; @@ -44,16 +44,14 @@ static int amdgpu_benchmark_do_move(struct amdgpu_device *adev, unsigned size, if (r) goto exit_do_move; r = dma_fence_wait(fence, false); + dma_fence_put(fence); if (r) goto exit_do_move; - dma_fence_put(fence); } end_jiffies = jiffies; r = jiffies_to_msecs(end_jiffies - start_jiffies); exit_do_move: - if (fence) - dma_fence_put(fence); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index ece55c8fa673..a62cbc8199de 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -217,11 +217,10 @@ amdgpu_connector_update_scratch_regs(struct drm_connector *connector, struct drm_encoder *encoder; const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; bool connected; - int i; best_encoder = connector_funcs->best_encoder(connector); - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { if ((encoder == best_encoder) && (status == connector_status_connected)) connected = true; else @@ -236,9 +235,8 @@ amdgpu_connector_find_encoder(struct drm_connector *connector, int encoder_type) { struct drm_encoder *encoder; - int i; - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { if (encoder->encoder_type == encoder_type) return encoder; } @@ -347,10 +345,9 @@ static struct drm_encoder * amdgpu_connector_best_single_encoder(struct drm_connector *connector) { struct drm_encoder *encoder; - int i; /* pick the first one */ - drm_connector_for_each_possible_encoder(connector, encoder, i) + drm_connector_for_each_possible_encoder(connector, encoder) return encoder; return NULL; @@ -1022,8 +1019,12 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) */ if (amdgpu_connector->shared_ddc && (ret == connector_status_connected)) { struct drm_connector *list_connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *list_amdgpu_connector; - list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(list_connector, + &iter) { if (connector == list_connector) continue; list_amdgpu_connector = to_amdgpu_connector(list_connector); @@ -1040,6 +1041,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) } } } + drm_connector_list_iter_end(&iter); } } } @@ -1065,9 +1067,8 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force) /* find analog encoder */ if (amdgpu_connector->dac_load_detect) { struct drm_encoder *encoder; - int i; - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC && encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) continue; @@ -1117,9 +1118,8 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); struct drm_encoder *encoder; - int i; - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { if (amdgpu_connector->use_digital == true) { if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) return encoder; @@ -1134,7 +1134,7 @@ amdgpu_connector_dvi_encoder(struct drm_connector *connector) /* then check use digitial */ /* pick the first one */ - drm_connector_for_each_possible_encoder(connector, encoder, i) + drm_connector_for_each_possible_encoder(connector, encoder) return encoder; return NULL; @@ -1271,9 +1271,8 @@ u16 amdgpu_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn { struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; - int i; - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { amdgpu_encoder = to_amdgpu_encoder(encoder); switch (amdgpu_encoder->encoder_id) { @@ -1292,10 +1291,9 @@ static bool amdgpu_connector_encoder_is_hbr2(struct drm_connector *connector) { struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; - int i; bool found = false; - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { amdgpu_encoder = to_amdgpu_encoder(encoder); if (amdgpu_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2) found = true; @@ -1501,6 +1499,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; struct amdgpu_connector_atom_dig *amdgpu_dig_connector; struct drm_encoder *encoder; @@ -1515,10 +1514,12 @@ amdgpu_connector_add(struct amdgpu_device *adev, return; /* see if we already added it */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->connector_id == connector_id) { amdgpu_connector->devices |= supported_device; + drm_connector_list_iter_end(&iter); return; } if (amdgpu_connector->ddc_bus && i2c_bus->valid) { @@ -1533,6 +1534,7 @@ amdgpu_connector_add(struct amdgpu_device *adev, } } } + drm_connector_list_iter_end(&iter); /* check if it's a dp bridge */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 82823d9a8ba8..a169ff16277f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -35,6 +35,7 @@ #include "amdgpu_trace.h" #include "amdgpu_gmc.h" #include "amdgpu_gem.h" +#include "amdgpu_ras.h" static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p, struct drm_amdgpu_cs_chunk_fence *data, @@ -449,75 +450,12 @@ retry: return r; } -/* Last resort, try to evict something from the current working set */ -static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p, - struct amdgpu_bo *validated) -{ - uint32_t domain = validated->allowed_domains; - struct ttm_operation_ctx ctx = { true, false }; - int r; - - if (!p->evictable) - return false; - - for (;&p->evictable->tv.head != &p->validated; - p->evictable = list_prev_entry(p->evictable, tv.head)) { - - struct amdgpu_bo_list_entry *candidate = p->evictable; - struct amdgpu_bo *bo = ttm_to_amdgpu_bo(candidate->tv.bo); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - bool update_bytes_moved_vis; - uint32_t other; - - /* If we reached our current BO we can forget it */ - if (bo == validated) - break; - - /* We can't move pinned BOs here */ - if (bo->pin_count) - continue; - - other = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); - - /* Check if this BO is in one of the domains we need space for */ - if (!(other & domain)) - continue; - - /* Check if we can move this BO somewhere else */ - other = bo->allowed_domains & ~domain; - if (!other) - continue; - - /* Good we can try to move this BO somewhere else */ - update_bytes_moved_vis = - !amdgpu_gmc_vram_full_visible(&adev->gmc) && - amdgpu_bo_in_cpu_visible_vram(bo); - amdgpu_bo_placement_from_domain(bo, other); - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - p->bytes_moved += ctx.bytes_moved; - if (update_bytes_moved_vis) - p->bytes_moved_vis += ctx.bytes_moved; - - if (unlikely(r)) - break; - - p->evictable = list_prev_entry(p->evictable, tv.head); - list_move(&candidate->tv.head, &p->validated); - - return true; - } - - return false; -} - static int amdgpu_cs_validate(void *param, struct amdgpu_bo *bo) { struct amdgpu_cs_parser *p = param; int r; - do { - r = amdgpu_cs_bo_validate(p, bo); - } while (r == -ENOMEM && amdgpu_cs_try_evict(p, bo)); + r = amdgpu_cs_bo_validate(p, bo); if (r) return r; @@ -554,9 +492,6 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, lobj->user_pages); } - if (p->evictable == lobj) - p->evictable = NULL; - r = amdgpu_cs_validate(p, bo); if (r) return r; @@ -646,7 +581,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true, - &duplicates, false); + &duplicates); if (unlikely(r != 0)) { if (r != -ERESTARTSYS) DRM_ERROR("ttm_eu_reserve_buffers failed.\n"); @@ -657,9 +592,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, &p->bytes_moved_vis_threshold); p->bytes_moved = 0; p->bytes_moved_vis = 0; - p->evictable = list_last_entry(&p->validated, - struct amdgpu_bo_list_entry, - tv.head); r = amdgpu_vm_validate_pt_bos(p->adev, &fpriv->vm, amdgpu_cs_validate, p); @@ -911,7 +843,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_vm_update_directories(adev, vm); + r = amdgpu_vm_update_pdes(adev, vm, false); if (r) return r; @@ -1355,6 +1287,9 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) bool reserved_buffers = false; int i, r; + if (amdgpu_ras_intr_triggered()) + return -EHWPOISON; + if (!adev->accel_working) return -EBUSY; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c index 35a8d3c96fc9..08047bc4d588 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_csa.c @@ -80,7 +80,7 @@ int amdgpu_map_static_csa(struct amdgpu_device *adev, struct amdgpu_vm *vm, list_add(&csa_tv.head, &list); amdgpu_vm_get_pd_bo(vm, &list, &pd); - r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, false); + r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); if (r) { DRM_ERROR("failed to reserve CSA,PD BOs: err=%d\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 5652cc72ed3a..8e6726e0d035 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -859,6 +859,9 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) struct amdgpu_device *adev = dev->dev_private; int r = 0, i; + /* Avoid accidently unparking the sched thread during GPU reset */ + mutex_lock(&adev->lock_reset); + /* hold on the scheduler */ for (i = 0; i < AMDGPU_MAX_RINGS; i++) { struct amdgpu_ring *ring = adev->rings[i]; @@ -884,6 +887,8 @@ static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data) kthread_unpark(ring->sched.thread); } + mutex_unlock(&adev->lock_reset); + return 0; } @@ -1036,6 +1041,9 @@ static int amdgpu_debugfs_ib_preempt(void *data, u64 val) if (!fences) return -ENOMEM; + /* Avoid accidently unparking the sched thread during GPU reset */ + mutex_lock(&adev->lock_reset); + /* stop the scheduler */ kthread_park(ring->sched.thread); @@ -1075,10 +1083,11 @@ failure: /* restart the scheduler */ kthread_unpark(ring->sched.thread); + mutex_unlock(&adev->lock_reset); + ttm_bo_unlock_delayed_workqueue(&adev->mman.bdev, resched); - if (fences) - kfree(fences); + kfree(fences); return 0; } @@ -1090,8 +1099,8 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) { adev->debugfs_preempt = debugfs_create_file("amdgpu_preempt_ib", 0600, - adev->ddev->primary->debugfs_root, - (void *)adev, &fops_ib_preempt); + adev->ddev->primary->debugfs_root, adev, + &fops_ib_preempt); if (!(adev->debugfs_preempt)) { DRM_ERROR("unable to create amdgpu_preempt_ib debugsfs file\n"); return -EIO; @@ -1103,8 +1112,7 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { - if (adev->debugfs_preempt) - debugfs_remove(adev->debugfs_preempt); + debugfs_remove(adev->debugfs_preempt); } #else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7a6c837c0a85..4f76beafb2fa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -65,6 +65,8 @@ #include "amdgpu_ras.h" #include "amdgpu_pmu.h" +#include <linux/suspend.h> + MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/raven_gpu_info.bin"); @@ -78,7 +80,7 @@ MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 -static const char *amdgpu_asic_name[] = { +const char *amdgpu_asic_name[] = { "TAHITI", "PITCAIRN", "VERDE", @@ -151,6 +153,36 @@ bool amdgpu_device_is_px(struct drm_device *dev) return false; } +/** + * VRAM access helper functions. + * + * amdgpu_device_vram_access - read/write a buffer in vram + * + * @adev: amdgpu_device pointer + * @pos: offset of the buffer in vram + * @buf: virtual address of the buffer in system memory + * @size: read/write size, sizeof(@buf) must > @size + * @write: true - write to vram, otherwise - read from vram + */ +void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, + uint32_t *buf, size_t size, bool write) +{ + uint64_t last; + unsigned long flags; + + last = size - 4; + for (last += pos; pos <= last; pos += 4) { + spin_lock_irqsave(&adev->mmio_idx_lock, flags); + WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); + WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31); + if (write) + WREG32_NO_KIQ(mmMM_DATA, *buf++); + else + *buf++ = RREG32_NO_KIQ(mmMM_DATA); + spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + } +} + /* * MMIO register access helper functions. */ @@ -1023,12 +1055,6 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) amdgpu_device_check_block_size(adev); - ret = amdgpu_device_get_job_timeout_settings(adev); - if (ret) { - dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); - return ret; - } - adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); return ret; @@ -1469,6 +1495,9 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) (const struct gpu_info_firmware_v1_0 *)(adev->firmware.gpu_info_fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) + goto parse_soc_bounding_box; + adev->gfx.config.max_shader_engines = le32_to_cpu(gpu_info_fw->gc_num_se); adev->gfx.config.max_cu_per_sh = le32_to_cpu(gpu_info_fw->gc_num_cu_per_sh); adev->gfx.config.max_sh_per_se = le32_to_cpu(gpu_info_fw->gc_num_sh_per_se); @@ -1496,7 +1525,13 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) adev->gfx.config.num_packer_per_sc = le32_to_cpu(gpu_info_fw->num_packer_per_sc); } + +parse_soc_bounding_box: #ifdef CONFIG_DRM_AMD_DC_DCN2_0 + /* + * soc bounding box info is not integrated in disocovery table, + * we always need to parse it from gpu info firmware. + */ if (hdr->version_minor == 2) { const struct gpu_info_firmware_v1_2 *gpu_info_fw = (const struct gpu_info_firmware_v1_2 *)(adev->firmware.gpu_info_fw->data + @@ -1613,6 +1648,9 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) if (r) return r; + if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10) + amdgpu_discovery_get_gfx_info(adev); + amdgpu_amdkfd_device_probe(adev); if (amdgpu_sriov_vf(adev)) { @@ -1622,7 +1660,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) } adev->pm.pp_feature = amdgpu_pp_feature_mask; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev) || sched_policy == KFD_SCHED_POLICY_NO_HWS) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; for (i = 0; i < adev->num_ip_blocks; i++) { @@ -1839,6 +1877,19 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) if (r) goto init_failed; + /* + * retired pages will be loaded from eeprom and reserved here, + * it should be called after amdgpu_device_ip_hw_init_phase2 since + * for some ASICs the RAS EEPROM code relies on SMU fully functioning + * for I2C communication which only true at this point. + * recovery_init may fail, but it can free all resources allocated by + * itself and its failure should not stop amdgpu init process. + * + * Note: theoretically, this should be called before all vram allocations + * to protect retired page from abusing + */ + amdgpu_ras_recovery_init(adev); + if (adev->gmc.xgmi.num_physical_nodes > 1) amdgpu_xgmi_add_device(adev); amdgpu_amdkfd_device_init(adev); @@ -2006,6 +2057,7 @@ out: */ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) { + struct amdgpu_gpu_instance *gpu_instance; int i = 0, r; for (i = 0; i < adev->num_ip_blocks; i++) { @@ -2031,8 +2083,39 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) if (r) DRM_ERROR("enable mgpu fan boost failed (%d).\n", r); - /* set to low pstate by default */ - amdgpu_xgmi_set_pstate(adev, 0); + + if (adev->gmc.xgmi.num_physical_nodes > 1) { + mutex_lock(&mgpu_info.mutex); + + /* + * Reset device p-state to low as this was booted with high. + * + * This should be performed only after all devices from the same + * hive get initialized. + * + * However, it's unknown how many device in the hive in advance. + * As this is counted one by one during devices initializations. + * + * So, we wait for all XGMI interlinked devices initialized. + * This may bring some delays as those devices may come from + * different hives. But that should be OK. + */ + if (mgpu_info.num_dgpu == adev->gmc.xgmi.num_physical_nodes) { + for (i = 0; i < mgpu_info.num_gpu; i++) { + gpu_instance = &(mgpu_info.gpu_ins[i]); + if (gpu_instance->adev->flags & AMD_IS_APU) + continue; + + r = amdgpu_xgmi_set_pstate(gpu_instance->adev, 0); + if (r) { + DRM_ERROR("pstate setting failed (%d).\n", r); + break; + } + } + } + + mutex_unlock(&mgpu_info.mutex); + } return 0; } @@ -2220,6 +2303,12 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) /* displays are handled in phase1 */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) continue; + /* PSP lost connection when err_event_athub occurs */ + if (amdgpu_ras_intr_triggered() && + adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_PSP) { + adev->ip_blocks[i].status.hw = false; + continue; + } /* XXX handle errors */ r = adev->ip_blocks[i].version->funcs->suspend(adev); /* XXX handle errors */ @@ -2231,17 +2320,17 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) /* handle putting the SMC in the appropriate state */ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { if (is_support_sw_smu(adev)) { - /* todo */ + r = smu_set_mp1_state(&adev->smu, adev->mp1_state); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_mp1_state) { r = adev->powerplay.pp_funcs->set_mp1_state( adev->powerplay.pp_handle, adev->mp1_state); - if (r) { - DRM_ERROR("SMC failed to set mp1 state %d, %d\n", - adev->mp1_state, r); - return r; - } + } + if (r) { + DRM_ERROR("SMC failed to set mp1 state %d, %d\n", + adev->mp1_state, r); + return r; } } @@ -2556,6 +2645,73 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) adev->asic_reset_res, adev->ddev->unique); } +static int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) +{ + char *input = amdgpu_lockup_timeout; + char *timeout_setting = NULL; + int index = 0; + long timeout; + int ret = 0; + + /* + * By default timeout for non compute jobs is 10000. + * And there is no timeout enforced on compute jobs. + * In SR-IOV or passthrough mode, timeout for compute + * jobs are 10000 by default. + */ + adev->gfx_timeout = msecs_to_jiffies(10000); + adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; + if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) + adev->compute_timeout = adev->gfx_timeout; + else + adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; + + if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { + while ((timeout_setting = strsep(&input, ",")) && + strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENGTH)) { + ret = kstrtol(timeout_setting, 0, &timeout); + if (ret) + return ret; + + if (timeout == 0) { + index++; + continue; + } else if (timeout < 0) { + timeout = MAX_SCHEDULE_TIMEOUT; + } else { + timeout = msecs_to_jiffies(timeout); + } + + switch (index++) { + case 0: + adev->gfx_timeout = timeout; + break; + case 1: + adev->compute_timeout = timeout; + break; + case 2: + adev->sdma_timeout = timeout; + break; + case 3: + adev->video_timeout = timeout; + break; + default: + break; + } + } + /* + * There is only one value specified and + * it should apply to all non-compute jobs. + */ + if (index == 1) { + adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; + if (amdgpu_sriov_vf(adev) || amdgpu_passthrough(adev)) + adev->compute_timeout = adev->gfx_timeout; + } + } + + return ret; +} /** * amdgpu_device_init - initialize the driver @@ -2583,7 +2739,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->ddev = ddev; adev->pdev = pdev; adev->flags = flags; - adev->asic_type = flags & AMD_ASIC_MASK; + + if (amdgpu_force_asic_type >= 0 && amdgpu_force_asic_type < CHIP_LAST) + adev->asic_type = amdgpu_force_asic_type; + else + adev->asic_type = flags & AMD_ASIC_MASK; + adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT; if (amdgpu_emu_mode == 1) adev->usec_timeout *= 2; @@ -2726,6 +2887,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) return r; + r = amdgpu_device_get_job_timeout_settings(adev); + if (r) { + dev_err(adev->dev, "invalid lockup_timeout parameter syntax\n"); + return r; + } + /* doorbell bar mapping and doorbell index init*/ amdgpu_device_doorbell_init(adev); @@ -2942,7 +3109,9 @@ void amdgpu_device_fini(struct amdgpu_device *adev) int r; DRM_INFO("amdgpu: finishing device.\n"); + flush_delayed_work(&adev->delayed_init_work); adev->shutdown = true; + /* disable all interrupts */ amdgpu_irq_disable_all(adev); if (adev->mode_info.mode_config_initialized){ @@ -2960,7 +3129,6 @@ void amdgpu_device_fini(struct amdgpu_device *adev) adev->firmware.gpu_info_fw = NULL; } adev->accel_working = false; - cancel_delayed_work_sync(&adev->delayed_init_work); /* free i2c buses */ if (!amdgpu_device_has_dc_support(adev)) amdgpu_i2c_fini(adev); @@ -3014,6 +3182,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) struct amdgpu_device *adev; struct drm_crtc *crtc; struct drm_connector *connector; + struct drm_connector_list_iter iter; int r; if (dev == NULL || dev->dev_private == NULL) { @@ -3036,9 +3205,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) if (!amdgpu_device_has_dc_support(adev)) { /* turn off display hw */ drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); - } + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_OFF); + drm_connector_list_iter_end(&iter); drm_modeset_unlock_all(dev); /* unpin the front buffers and cursors */ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { @@ -3089,15 +3260,11 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) */ amdgpu_bo_evict_vram(adev); - pci_save_state(dev->pdev); if (suspend) { + pci_save_state(dev->pdev); /* Shut down the device */ pci_disable_device(dev->pdev); pci_set_power_state(dev->pdev, PCI_D3hot); - } else { - r = amdgpu_asic_reset(adev); - if (r) - DRM_ERROR("amdgpu asic reset failed\n"); } return 0; @@ -3117,6 +3284,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon) int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) { struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_device *adev = dev->dev_private; struct drm_crtc *crtc; int r = 0; @@ -3187,9 +3355,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon) /* turn on display hw */ drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); - } + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) + drm_helper_connector_dpms(connector, + DRM_MODE_DPMS_ON); + drm_connector_list_iter_end(&iter); + drm_modeset_unlock_all(dev); } amdgpu_fbdev_set_suspend(adev, 0); @@ -3635,11 +3807,6 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive, break; } } - - list_for_each_entry(tmp_adev, device_list_handle, - gmc.xgmi.head) { - amdgpu_ras_reserve_bad_pages(tmp_adev); - } } } @@ -3743,25 +3910,18 @@ static bool amdgpu_device_lock_adev(struct amdgpu_device *adev, bool trylock) adev->mp1_state = PP_MP1_STATE_NONE; break; } - /* Block kfd: SRIOV would do it separately */ - if (!amdgpu_sriov_vf(adev)) - amdgpu_amdkfd_pre_reset(adev); return true; } static void amdgpu_device_unlock_adev(struct amdgpu_device *adev) { - /*unlock kfd: SRIOV would do it separately */ - if (!amdgpu_sriov_vf(adev)) - amdgpu_amdkfd_post_reset(adev); amdgpu_vf_error_trans_all(adev); adev->mp1_state = PP_MP1_STATE_NONE; adev->in_gpu_reset = 0; mutex_unlock(&adev->lock_reset); } - /** * amdgpu_device_gpu_recover - reset the asic and recover scheduler * @@ -3781,11 +3941,24 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, struct amdgpu_hive_info *hive = NULL; struct amdgpu_device *tmp_adev = NULL; int i, r = 0; + bool in_ras_intr = amdgpu_ras_intr_triggered(); + + /* + * Flush RAM to disk so that after reboot + * the user can read log and see why the system rebooted. + */ + if (in_ras_intr && amdgpu_ras_get_context(adev)->reboot) { + + DRM_WARN("Emergency reboot."); + + ksys_sync_helper(); + emergency_restart(); + } need_full_reset = job_signaled = false; INIT_LIST_HEAD(&device_list); - dev_info(adev->dev, "GPU reset begin!\n"); + dev_info(adev->dev, "GPU %s begin!\n", in_ras_intr ? "jobs stop":"reset"); cancel_delayed_work_sync(&adev->delayed_init_work); @@ -3812,9 +3985,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, return 0; } + /* Block kfd: SRIOV would do it separately */ + if (!amdgpu_sriov_vf(adev)) + amdgpu_amdkfd_pre_reset(adev); + /* Build list of devices to reset */ if (adev->gmc.xgmi.num_physical_nodes > 1) { if (!hive) { + /*unlock kfd: SRIOV would do it separately */ + if (!amdgpu_sriov_vf(adev)) + amdgpu_amdkfd_post_reset(adev); amdgpu_device_unlock_adev(adev); return -ENODEV; } @@ -3830,17 +4010,22 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, device_list_handle = &device_list; } - /* - * Mark these ASICs to be reseted as untracked first - * And add them back after reset completed - */ - list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) - amdgpu_unregister_gpu_instance(tmp_adev); - /* block all schedulers and reset given job's ring */ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + if (tmp_adev != adev) { + amdgpu_device_lock_adev(tmp_adev, false); + if (!amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_pre_reset(tmp_adev); + } + + /* + * Mark these ASICs to be reseted as untracked first + * And add them back after reset completed + */ + amdgpu_unregister_gpu_instance(tmp_adev); + /* disable ras on ALL IPs */ - if (amdgpu_device_ip_need_full_reset(tmp_adev)) + if (!in_ras_intr && amdgpu_device_ip_need_full_reset(tmp_adev)) amdgpu_ras_suspend(tmp_adev); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -3850,10 +4035,16 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, continue; drm_sched_stop(&ring->sched, job ? &job->base : NULL); + + if (in_ras_intr) + amdgpu_job_stop_all_jobs_on_sched(&ring->sched); } } + if (in_ras_intr) + goto skip_sched_resume; + /* * Must check guilty signal here since after this point all old * HW fences are force signaled. @@ -3864,9 +4055,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, dma_fence_is_signaled(job->base.s_fence->parent)) job_signaled = true; - if (!amdgpu_device_ip_need_full_reset(adev)) - device_list_handle = &device_list; - if (job_signaled) { dev_info(adev->dev, "Guilty job already signaled, skipping HW reset"); goto skip_hw_reset; @@ -3888,7 +4076,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ if (tmp_adev == adev) continue; - amdgpu_device_lock_adev(tmp_adev, false); r = amdgpu_device_pre_asic_reset(tmp_adev, NULL, &need_full_reset); @@ -3916,6 +4103,7 @@ skip_hw_reset: /* Post ASIC reset for all devs .*/ list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; @@ -3937,12 +4125,18 @@ skip_hw_reset: if (r) { /* bad news, how to tell it to userspace ? */ - dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter)); + dev_info(tmp_adev->dev, "GPU reset(%d) failed\n", atomic_read(&tmp_adev->gpu_reset_counter)); amdgpu_vf_error_put(tmp_adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r); } else { - dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&adev->gpu_reset_counter)); + dev_info(tmp_adev->dev, "GPU reset(%d) succeeded!\n", atomic_read(&tmp_adev->gpu_reset_counter)); } + } +skip_sched_resume: + list_for_each_entry(tmp_adev, device_list_handle, gmc.xgmi.head) { + /*unlock kfd: SRIOV would do it separately */ + if (!in_ras_intr && !amdgpu_sriov_vf(tmp_adev)) + amdgpu_amdkfd_post_reset(tmp_adev); amdgpu_device_unlock_adev(tmp_adev); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 1481899f86c1..f95092741c38 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -134,20 +134,10 @@ static int hw_id_map[MAX_HWIP] = { static int amdgpu_discovery_read_binary(struct amdgpu_device *adev, uint8_t *binary) { - uint32_t *p = (uint32_t *)binary; uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20; - uint64_t pos = vram_size - BINARY_MAX_SIZE; - unsigned long flags; - - while (pos < vram_size) { - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31); - *p++ = RREG32_NO_KIQ(mmMM_DATA); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - pos += 4; - } + uint64_t pos = vram_size - DISCOVERY_TMR_SIZE; + amdgpu_device_vram_access(adev, pos, (uint32_t *)binary, DISCOVERY_TMR_SIZE, false); return 0; } @@ -179,7 +169,7 @@ int amdgpu_discovery_init(struct amdgpu_device *adev) uint16_t checksum; int r; - adev->discovery = kzalloc(BINARY_MAX_SIZE, GFP_KERNEL); + adev->discovery = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL); if (!adev->discovery) return -ENOMEM; @@ -333,7 +323,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev) } int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, - int *major, int *minor) + int *major, int *minor, int *revision) { struct binary_header *bhdr; struct ip_discovery_header *ihdr; @@ -369,6 +359,8 @@ int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, *major = ip->major; if (minor) *minor = ip->minor; + if (revision) + *revision = ip->revision; return 0; } ip_offset += sizeof(*ip) + 4 * (ip->num_base_address - 1); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h index 85b8c4d4d576..ba78e15d9b05 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h @@ -24,11 +24,13 @@ #ifndef __AMDGPU_DISCOVERY__ #define __AMDGPU_DISCOVERY__ +#define DISCOVERY_TMR_SIZE (64 << 10) + int amdgpu_discovery_init(struct amdgpu_device *adev); void amdgpu_discovery_fini(struct amdgpu_device *adev); int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev); int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, - int *major, int *minor); + int *major, int *minor, int *revision); int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev); #endif /* __AMDGPU_DISCOVERY__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 82efc1e22e61..3cadb0b76f22 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -370,11 +370,13 @@ void amdgpu_display_print_display_setup(struct drm_device *dev) struct amdgpu_connector *amdgpu_connector; struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; + struct drm_connector_list_iter iter; uint32_t devices; int i = 0; + drm_connector_list_iter_begin(dev, &iter); DRM_INFO("AMDGPU Display Connectors\n"); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); DRM_INFO("Connector %d:\n", i); DRM_INFO(" %s\n", connector->name); @@ -438,6 +440,7 @@ void amdgpu_display_print_display_setup(struct drm_device *dev) } i++; } + drm_connector_list_iter_end(&iter); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index 61f108ec2b5c..e2eec7b66334 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -34,27 +34,12 @@ #include "amdgpu.h" #include "amdgpu_display.h" #include "amdgpu_gem.h" +#include "amdgpu_dma_buf.h" #include <drm/amdgpu_drm.h> #include <linux/dma-buf.h> #include <linux/dma-fence-array.h> /** - * amdgpu_gem_prime_get_sg_table - &drm_driver.gem_prime_get_sg_table - * implementation - * @obj: GEM buffer object (BO) - * - * Returns: - * A scatter/gather table for the pinned pages of the BO's memory. - */ -struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) -{ - struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - int npages = bo->tbo.num_pages; - - return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); -} - -/** * amdgpu_gem_prime_vmap - &dma_buf_ops.vmap implementation * @obj: GEM BO * @@ -179,92 +164,126 @@ err_fences_put: } /** - * amdgpu_dma_buf_map_attach - &dma_buf_ops.attach implementation - * @dma_buf: Shared DMA buffer + * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation + * + * @dmabuf: DMA-buf where we attach to + * @attach: attachment to add + * + * Add the attachment as user to the exported DMA-buf. + */ +static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = dmabuf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + int r; + + if (attach->dev->driver == adev->dev->driver) + return 0; + + r = amdgpu_bo_reserve(bo, false); + if (unlikely(r != 0)) + return r; + + /* + * We only create shared fences for internal use, but importers + * of the dmabuf rely on exclusive fences for implicitly + * tracking write hazards. As any of the current fences may + * correspond to a write, we need to convert all existing + * fences on the reservation object into a single exclusive + * fence. + */ + r = __dma_resv_make_exclusive(bo->tbo.base.resv); + if (r) + return r; + + bo->prime_shared_count++; + amdgpu_bo_unreserve(bo); + return 0; +} + +/** + * amdgpu_dma_buf_detach - &dma_buf_ops.detach implementation + * + * @dmabuf: DMA-buf where we remove the attachment from + * @attach: the attachment to remove + * + * Called when an attachment is removed from the DMA-buf. + */ +static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attach) +{ + struct drm_gem_object *obj = dmabuf->priv; + struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + + if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) + bo->prime_shared_count--; +} + +/** + * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation * @attach: DMA-buf attachment + * @dir: DMA direction * * Makes sure that the shared DMA buffer can be accessed by the target device. * For now, simply pins it to the GTT domain, where it should be accessible by * all DMA devices. * * Returns: - * 0 on success or a negative error code on failure. + * sg_table filled with the DMA addresses to use or ERR_PRT with negative error + * code. */ -static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf, - struct dma_buf_attachment *attach) +static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, + enum dma_data_direction dir) { + struct dma_buf *dma_buf = attach->dmabuf; struct drm_gem_object *obj = dma_buf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct sg_table *sgt; long r; - r = drm_gem_map_attach(dma_buf, attach); - if (r) - return r; - - r = amdgpu_bo_reserve(bo, false); - if (unlikely(r != 0)) - goto error_detach; - - - if (attach->dev->driver != adev->dev->driver) { - /* - * We only create shared fences for internal use, but importers - * of the dmabuf rely on exclusive fences for implicitly - * tracking write hazards. As any of the current fences may - * correspond to a write, we need to convert all existing - * fences on the reservation object into a single exclusive - * fence. - */ - r = __dma_resv_make_exclusive(bo->tbo.base.resv); - if (r) - goto error_unreserve; - } - - /* pin buffer into GTT */ r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT); if (r) - goto error_unreserve; + return ERR_PTR(r); - if (attach->dev->driver != adev->dev->driver) - bo->prime_shared_count++; + sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages); + if (IS_ERR(sgt)) + return sgt; -error_unreserve: - amdgpu_bo_unreserve(bo); + if (!dma_map_sg_attrs(attach->dev, sgt->sgl, sgt->nents, dir, + DMA_ATTR_SKIP_CPU_SYNC)) + goto error_free; -error_detach: - if (r) - drm_gem_map_detach(dma_buf, attach); - return r; + return sgt; + +error_free: + sg_free_table(sgt); + kfree(sgt); + return ERR_PTR(-ENOMEM); } /** - * amdgpu_dma_buf_map_detach - &dma_buf_ops.detach implementation - * @dma_buf: Shared DMA buffer + * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation * @attach: DMA-buf attachment + * @sgt: sg_table to unmap + * @dir: DMA direction * * This is called when a shared DMA buffer no longer needs to be accessible by * another device. For now, simply unpins the buffer from GTT. */ -static void amdgpu_dma_buf_map_detach(struct dma_buf *dma_buf, - struct dma_buf_attachment *attach) +static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) { - struct drm_gem_object *obj = dma_buf->priv; + struct drm_gem_object *obj = attach->dmabuf->priv; struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - int ret = 0; - - ret = amdgpu_bo_reserve(bo, true); - if (unlikely(ret != 0)) - goto error; + dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir); + sg_free_table(sgt); + kfree(sgt); amdgpu_bo_unpin(bo); - if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count) - bo->prime_shared_count--; - amdgpu_bo_unreserve(bo); - -error: - drm_gem_map_detach(dma_buf, attach); } /** @@ -308,10 +327,11 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, } const struct dma_buf_ops amdgpu_dmabuf_ops = { - .attach = amdgpu_dma_buf_map_attach, - .detach = amdgpu_dma_buf_map_detach, - .map_dma_buf = drm_gem_map_dma_buf, - .unmap_dma_buf = drm_gem_unmap_dma_buf, + .dynamic_mapping = true, + .attach = amdgpu_dma_buf_attach, + .detach = amdgpu_dma_buf_detach, + .map_dma_buf = amdgpu_dma_buf_map, + .unmap_dma_buf = amdgpu_dma_buf_unmap, .release = drm_gem_dmabuf_release, .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access, .mmap = drm_gem_dmabuf_mmap, @@ -321,7 +341,6 @@ const struct dma_buf_ops amdgpu_dmabuf_ops = { /** * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation - * @dev: DRM device * @gobj: GEM BO * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR. * @@ -350,31 +369,28 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj, } /** - * amdgpu_gem_prime_import_sg_table - &drm_driver.gem_prime_import_sg_table - * implementation + * amdgpu_dma_buf_create_obj - create BO for DMA-buf import + * * @dev: DRM device - * @attach: DMA-buf attachment - * @sg: Scatter/gather table + * @dma_buf: DMA-buf * - * Imports shared DMA buffer memory exported by another device. + * Creates an empty SG BO for DMA-buf import. * * Returns: * A new GEM BO of the given DRM device, representing the memory * described by the given DMA-buf attachment and scatter/gather table. */ -struct drm_gem_object * -amdgpu_gem_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, - struct sg_table *sg) +static struct drm_gem_object * +amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) { - struct dma_resv *resv = attach->dmabuf->resv; + struct dma_resv *resv = dma_buf->resv; struct amdgpu_device *adev = dev->dev_private; struct amdgpu_bo *bo; struct amdgpu_bo_param bp; int ret; memset(&bp, 0, sizeof(bp)); - bp.size = attach->dmabuf->size; + bp.size = dma_buf->size; bp.byte_align = PAGE_SIZE; bp.domain = AMDGPU_GEM_DOMAIN_CPU; bp.flags = 0; @@ -385,11 +401,9 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev, if (ret) goto error; - bo->tbo.sg = sg; - bo->tbo.ttm->sg = sg; bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; - if (attach->dmabuf->ops != &amdgpu_dmabuf_ops) + if (dma_buf->ops != &amdgpu_dmabuf_ops) bo->prime_shared_count = 1; dma_resv_unlock(resv); @@ -405,15 +419,15 @@ error: * @dev: DRM device * @dma_buf: Shared DMA buffer * - * The main work is done by the &drm_gem_prime_import helper, which in turn - * uses &amdgpu_gem_prime_import_sg_table. + * Import a dma_buf into a the driver and potentially create a new GEM object. * * Returns: * GEM BO representing the shared DMA buffer for the given device. */ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, - struct dma_buf *dma_buf) + struct dma_buf *dma_buf) { + struct dma_buf_attachment *attach; struct drm_gem_object *obj; if (dma_buf->ops == &amdgpu_dmabuf_ops) { @@ -428,5 +442,17 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, } } - return drm_gem_prime_import(dev, dma_buf); + obj = amdgpu_dma_buf_create_obj(dev, dma_buf); + if (IS_ERR(obj)) + return obj; + + attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true); + if (IS_ERR(attach)) { + drm_gem_object_put(obj); + return ERR_CAST(attach); + } + + get_dma_buf(dma_buf); + obj->import_attach = attach; + return obj; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h index 5012e6ab58f1..ec447a7b6b28 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.h @@ -25,11 +25,6 @@ #include <drm/drm_gem.h> -struct sg_table *amdgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); -struct drm_gem_object * -amdgpu_gem_prime_import_sg_table(struct drm_device *dev, - struct dma_buf_attachment *attach, - struct sg_table *sg); struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj, int flags); struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c index 5803fcbae22f..9cc270efee7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c @@ -911,7 +911,8 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) if (is_support_sw_smu(adev)) { ret = smu_get_dpm_freq_range(&adev->smu, SMU_GFXCLK, low ? &clk_freq : NULL, - !low ? &clk_freq : NULL); + !low ? &clk_freq : NULL, + true); if (ret) return 0; return clk_freq * 100; @@ -928,7 +929,8 @@ int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) if (is_support_sw_smu(adev)) { ret = smu_get_dpm_freq_range(&adev->smu, SMU_UCLK, low ? &clk_freq : NULL, - !low ? &clk_freq : NULL); + !low ? &clk_freq : NULL, + true); if (ret) return 0; return clk_freq * 100; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h index 1c5c0fd76dbf..2cfb677272af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h @@ -298,12 +298,6 @@ enum amdgpu_pcie_gen { #define amdgpu_dpm_get_current_power_state(adev) \ ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)) -#define amdgpu_smu_get_current_power_state(adev) \ - ((adev)->smu.ppt_funcs->get_current_power_state(&((adev)->smu))) - -#define amdgpu_smu_set_power_state(adev) \ - ((adev)->smu.ppt_funcs->set_power_state(&((adev)->smu))) - #define amdgpu_dpm_get_pp_num_states(adev, data) \ ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index b19157b19fa0..0ffc9447b573 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -43,6 +43,8 @@ #include "amdgpu_amdkfd.h" +#include "amdgpu_ras.h" + /* * KMS wrapper. * - 3.0.0 - initial driver @@ -82,13 +84,12 @@ * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS. * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask + * - 3.36.0 - Allow reading more status registers on si/cik */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 35 +#define KMS_DRIVER_MINOR 36 #define KMS_DRIVER_PATCHLEVEL 0 -#define AMDGPU_MAX_TIMEOUT_PARAM_LENTH 256 - int amdgpu_vram_limit = 0; int amdgpu_vis_vram_limit = 0; int amdgpu_gart_size = -1; /* auto */ @@ -101,7 +102,7 @@ int amdgpu_disp_priority = 0; int amdgpu_hw_i2c = 0; int amdgpu_pcie_gen2 = -1; int amdgpu_msi = -1; -char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENTH]; +char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH]; int amdgpu_dpm = -1; int amdgpu_fw_load_type = -1; int amdgpu_aspm = -1; @@ -128,11 +129,7 @@ char *amdgpu_disable_cu = NULL; char *amdgpu_virtual_display = NULL; /* OverDrive(bit 14) disabled by default*/ uint amdgpu_pp_feature_mask = 0xffffbfff; -int amdgpu_ngg = 0; -int amdgpu_prim_buf_per_se = 0; -int amdgpu_pos_buf_per_se = 0; -int amdgpu_cntl_sb_buf_per_se = 0; -int amdgpu_param_buf_per_se = 0; +uint amdgpu_force_long_training = 0; int amdgpu_job_hang_limit = 0; int amdgpu_lbpw = -1; int amdgpu_compute_multipipe = -1; @@ -146,12 +143,13 @@ int amdgpu_mcbp = 0; int amdgpu_discovery = -1; int amdgpu_mes = 0; int amdgpu_noretry = 1; +int amdgpu_force_asic_type = -1; struct amdgpu_mgpu_info mgpu_info = { .mutex = __MUTEX_INITIALIZER(mgpu_info.mutex), }; int amdgpu_ras_enable = -1; -uint amdgpu_ras_mask = 0xfffffffb; +uint amdgpu_ras_mask = 0xffffffff; /** * DOC: vramlimit (int) @@ -244,16 +242,21 @@ module_param_named(msi, amdgpu_msi, int, 0444); * * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or * multiple values specified. 0 and negative values are invalidated. They will be adjusted - * to default timeout. - * - With one value specified, the setting will apply to all non-compute jobs. - * - With multiple values specified, the first one will be for GFX. The second one is for Compute. - * And the third and fourth ones are for SDMA and Video. + * to the default timeout. + * + * - With one value specified, the setting will apply to all non-compute jobs. + * - With multiple values specified, the first one will be for GFX. + * The second one is for Compute. The third and fourth ones are + * for SDMA and Video. + * * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video) * jobs is 10000. And there is no timeout enforced on compute jobs. */ -MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: 10000 for non-compute jobs and infinity timeout for compute jobs." +MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and infinity timeout for compute jobs; " + "for passthrough or sriov, 10000 for all jobs." " 0: keep default value. negative: infinity timeout), " - "format is [Non-Compute] or [GFX,Compute,SDMA,Video]"); + "format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; " + "for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video]."); module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444); /** @@ -392,6 +395,14 @@ MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))"); module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, uint, 0444); /** + * DOC: forcelongtraining (uint) + * Force long memory training in resume. + * The default is zero, indicates short training in resume. + */ +MODULE_PARM_DESC(forcelongtraining, "force memory long training"); +module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444); + +/** * DOC: pcie_gen_cap (uint) * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h. * The default is 0 (automatic for each asic). @@ -449,42 +460,6 @@ MODULE_PARM_DESC(virtual_display, module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444); /** - * DOC: ngg (int) - * Set to enable Next Generation Graphics (1 = enable). The default is 0 (disabled). - */ -MODULE_PARM_DESC(ngg, "Next Generation Graphics (1 = enable, 0 = disable(default depending on gfx))"); -module_param_named(ngg, amdgpu_ngg, int, 0444); - -/** - * DOC: prim_buf_per_se (int) - * Override the size of Primitive Buffer per Shader Engine in Byte. The default is 0 (depending on gfx). - */ -MODULE_PARM_DESC(prim_buf_per_se, "the size of Primitive Buffer per Shader Engine (default depending on gfx)"); -module_param_named(prim_buf_per_se, amdgpu_prim_buf_per_se, int, 0444); - -/** - * DOC: pos_buf_per_se (int) - * Override the size of Position Buffer per Shader Engine in Byte. The default is 0 (depending on gfx). - */ -MODULE_PARM_DESC(pos_buf_per_se, "the size of Position Buffer per Shader Engine (default depending on gfx)"); -module_param_named(pos_buf_per_se, amdgpu_pos_buf_per_se, int, 0444); - -/** - * DOC: cntl_sb_buf_per_se (int) - * Override the size of Control Sideband per Shader Engine in Byte. The default is 0 (depending on gfx). - */ -MODULE_PARM_DESC(cntl_sb_buf_per_se, "the size of Control Sideband per Shader Engine (default depending on gfx)"); -module_param_named(cntl_sb_buf_per_se, amdgpu_cntl_sb_buf_per_se, int, 0444); - -/** - * DOC: param_buf_per_se (int) - * Override the size of Off-Chip Parameter Cache per Shader Engine in Byte. - * The default is 0 (depending on gfx). - */ -MODULE_PARM_DESC(param_buf_per_se, "the size of Off-Chip Parameter Cache per Shader Engine (default depending on gfx)"); -module_param_named(param_buf_per_se, amdgpu_param_buf_per_se, int, 0444); - -/** * DOC: job_hang_limit (int) * Set how much time allow a job hang and not drop it. The default is 0. */ @@ -616,6 +591,16 @@ MODULE_PARM_DESC(noretry, "Disable retry faults (0 = retry enabled, 1 = retry disabled (default))"); module_param_named(noretry, amdgpu_noretry, int, 0644); +/** + * DOC: force_asic_type (int) + * A non negative value used to specify the asic type for all supported GPUs. + */ +MODULE_PARM_DESC(force_asic_type, + "A non negative value used to specify the asic type for all supported GPUs"); +module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444); + + + #ifdef CONFIG_HSA_AMD /** * DOC: sched_policy (int) @@ -1023,6 +1008,7 @@ static const struct pci_device_id pciidlist[] = { /* Navi12 */ {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, {0, 0, 0} }; @@ -1085,7 +1071,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, #endif /* Get rid of things like offb */ - ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb"); + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb"); if (ret) return ret; @@ -1128,7 +1114,10 @@ amdgpu_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); - DRM_ERROR("Device removal is currently not supported outside of fbcon\n"); +#ifdef MODULE + if (THIS_MODULE->state != MODULE_STATE_GOING) +#endif + DRM_ERROR("Hotplug removal is not supported\n"); drm_dev_unplug(dev); drm_dev_put(dev); pci_disable_device(pdev); @@ -1141,6 +1130,9 @@ amdgpu_pci_shutdown(struct pci_dev *pdev) struct drm_device *dev = pci_get_drvdata(pdev); struct amdgpu_device *adev = dev->dev_private; + if (amdgpu_ras_intr_triggered()) + return; + /* if we are running in a VM, make sure the device * torn down properly on reboot/shutdown. * unfortunately we can't detect certain @@ -1175,8 +1167,13 @@ static int amdgpu_pmops_resume(struct device *dev) static int amdgpu_pmops_freeze(struct device *dev) { struct drm_device *drm_dev = dev_get_drvdata(dev); + struct amdgpu_device *adev = drm_dev->dev_private; + int r; - return amdgpu_device_suspend(drm_dev, false, true); + r = amdgpu_device_suspend(drm_dev, false, true); + if (r) + return r; + return amdgpu_asic_reset(adev); } static int amdgpu_pmops_thaw(struct device *dev) @@ -1348,66 +1345,6 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv) return 0; } -int amdgpu_device_get_job_timeout_settings(struct amdgpu_device *adev) -{ - char *input = amdgpu_lockup_timeout; - char *timeout_setting = NULL; - int index = 0; - long timeout; - int ret = 0; - - /* - * By default timeout for non compute jobs is 10000. - * And there is no timeout enforced on compute jobs. - */ - adev->gfx_timeout = msecs_to_jiffies(10000); - adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - adev->compute_timeout = MAX_SCHEDULE_TIMEOUT; - - if (strnlen(input, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) { - while ((timeout_setting = strsep(&input, ",")) && - strnlen(timeout_setting, AMDGPU_MAX_TIMEOUT_PARAM_LENTH)) { - ret = kstrtol(timeout_setting, 0, &timeout); - if (ret) - return ret; - - if (timeout == 0) { - index++; - continue; - } else if (timeout < 0) { - timeout = MAX_SCHEDULE_TIMEOUT; - } else { - timeout = msecs_to_jiffies(timeout); - } - - switch (index++) { - case 0: - adev->gfx_timeout = timeout; - break; - case 1: - adev->compute_timeout = timeout; - break; - case 2: - adev->sdma_timeout = timeout; - break; - case 3: - adev->video_timeout = timeout; - break; - default: - break; - } - } - /* - * There is only one value specified and - * it should apply to all non-compute jobs. - */ - if (index == 1) - adev->sdma_timeout = adev->video_timeout = adev->gfx_timeout; - } - - return ret; -} - static bool amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe, bool in_vblank_irq, int *vpos, int *hpos, @@ -1446,8 +1383,6 @@ static struct drm_driver kms_driver = { .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_export = amdgpu_gem_prime_export, .gem_prime_import = amdgpu_gem_prime_import, - .gem_prime_get_sg_table = amdgpu_gem_prime_get_sg_table, - .gem_prime_import_sg_table = amdgpu_gem_prime_import_sg_table, .gem_prime_vmap = amdgpu_gem_prime_vmap, .gem_prime_vunmap = amdgpu_gem_prime_vunmap, .gem_prime_mmap = amdgpu_gem_prime_mmap, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c index 571a6dfb473e..61fcf247a638 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_encoders.c @@ -37,12 +37,14 @@ amdgpu_link_encoder_connector(struct drm_device *dev) { struct amdgpu_device *adev = dev->dev_private; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; struct drm_encoder *encoder; struct amdgpu_encoder *amdgpu_encoder; + drm_connector_list_iter_begin(dev, &iter); /* walk the list and link encoders to connectors */ - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { amdgpu_encoder = to_amdgpu_encoder(encoder); @@ -55,6 +57,7 @@ amdgpu_link_encoder_connector(struct drm_device *dev) } } } + drm_connector_list_iter_end(&iter); } void amdgpu_encoder_set_active_device(struct drm_encoder *encoder) @@ -62,8 +65,10 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder) struct drm_device *dev = encoder->dev; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct drm_connector *connector; + struct drm_connector_list_iter iter; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); amdgpu_encoder->active_device = amdgpu_encoder->devices & amdgpu_connector->devices; @@ -72,6 +77,7 @@ void amdgpu_encoder_set_active_device(struct drm_encoder *encoder) amdgpu_connector->devices, encoder->encoder_type); } } + drm_connector_list_iter_end(&iter); } struct drm_connector * @@ -79,15 +85,20 @@ amdgpu_get_connector_for_encoder(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct drm_connector *connector; + struct drm_connector *connector, *found = NULL; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); - if (amdgpu_encoder->active_device & amdgpu_connector->devices) - return connector; + if (amdgpu_encoder->active_device & amdgpu_connector->devices) { + found = connector; + break; + } } - return NULL; + drm_connector_list_iter_end(&iter); + return found; } struct drm_connector * @@ -95,15 +106,20 @@ amdgpu_get_connector_for_encoder_init(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); - struct drm_connector *connector; + struct drm_connector *connector, *found = NULL; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { amdgpu_connector = to_amdgpu_connector(connector); - if (amdgpu_encoder->devices & amdgpu_connector->devices) - return connector; + if (amdgpu_encoder->devices & amdgpu_connector->devices) { + found = connector; + break; + } } - return NULL; + drm_connector_list_iter_end(&iter); + return found; } struct drm_encoder *amdgpu_get_external_encoder(struct drm_encoder *encoder) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 23085b352cf2..377fe20bce23 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -462,18 +462,7 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring, timeout = adev->gfx_timeout; break; case AMDGPU_RING_TYPE_COMPUTE: - /* - * For non-sriov case, no timeout enforce - * on compute ring by default. Unless user - * specifies a timeout for compute ring. - * - * For sriov case, always use the timeout - * as gfx ring - */ - if (!amdgpu_sriov_vf(ring->adev)) - timeout = adev->compute_timeout; - else - timeout = adev->gfx_timeout; + timeout = adev->compute_timeout; break; case AMDGPU_RING_TYPE_SDMA: timeout = adev->sdma_timeout; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 5e8bdded265f..19705e399905 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -71,7 +71,7 @@ */ static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev) { - struct page *dummy_page = adev->mman.bdev.glob->dummy_read_page; + struct page *dummy_page = ttm_bo_glob.dummy_read_page; if (adev->dummy_page_addr) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 8ceb44925947..4277125a79ee 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -175,7 +175,7 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj, amdgpu_vm_get_pd_bo(vm, &list, &vm_pd); - r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates, false); + r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates); if (r) { dev_err(adev->dev, "leaking bo va because " "we fail to reserve bo (%d)\n", r); @@ -527,13 +527,41 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, goto error; } - r = amdgpu_vm_update_directories(adev, vm); + r = amdgpu_vm_update_pdes(adev, vm, false); error: if (r && r != -ERESTARTSYS) DRM_ERROR("Couldn't update BO_VA (%d)\n", r); } +/** + * amdgpu_gem_va_map_flags - map GEM UAPI flags into hardware flags + * + * @adev: amdgpu_device pointer + * @flags: GEM UAPI flags + * + * Returns the GEM UAPI flags mapped into hardware for the ASIC. + */ +uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags) +{ + uint64_t pte_flag = 0; + + if (flags & AMDGPU_VM_PAGE_EXECUTABLE) + pte_flag |= AMDGPU_PTE_EXECUTABLE; + if (flags & AMDGPU_VM_PAGE_READABLE) + pte_flag |= AMDGPU_PTE_READABLE; + if (flags & AMDGPU_VM_PAGE_WRITEABLE) + pte_flag |= AMDGPU_PTE_WRITEABLE; + if (flags & AMDGPU_VM_PAGE_PRT) + pte_flag |= AMDGPU_PTE_PRT; + + if (adev->gmc.gmc_funcs->map_mtype) + pte_flag |= amdgpu_gmc_map_mtype(adev, + flags & AMDGPU_VM_MTYPE_MASK); + + return pte_flag; +} + int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { @@ -613,7 +641,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, amdgpu_vm_get_pd_bo(&fpriv->vm, &list, &vm_pd); - r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates, false); + r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates); if (r) goto error_unref; @@ -631,7 +659,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, switch (args->operation) { case AMDGPU_VA_OP_MAP: - va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); + va_flags = amdgpu_gem_va_map_flags(adev, args->flags); r = amdgpu_vm_bo_map(adev, bo_va, args->va_address, args->offset_in_bo, args->map_size, va_flags); @@ -646,7 +674,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, args->map_size); break; case AMDGPU_VA_OP_REPLACE: - va_flags = amdgpu_gmc_get_pte_flags(adev, args->flags); + va_flags = amdgpu_gem_va_map_flags(adev, args->flags); r = amdgpu_vm_bo_replace_map(adev, bo_va, args->va_address, args->offset_in_bo, args->map_size, va_flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index 0b66d2e6b5d5..e0f025dd1b14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -67,6 +67,7 @@ int amdgpu_gem_mmap_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); +uint64_t amdgpu_gem_va_map_flags(struct amdgpu_device *adev, uint32_t flags); int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index f9bef3154b99..e00b46180d2e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "amdgpu_gfx.h" #include "amdgpu_rlc.h" +#include "amdgpu_ras.h" /* delay 0.1 second to enable gfx off feature */ #define GFX_OFF_DELAY_ENABLE msecs_to_jiffies(100) @@ -231,12 +232,10 @@ void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) { - int i, queue, pipe, me; + int i, queue, me; for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) { queue = i % adev->gfx.me.num_queue_per_pipe; - pipe = (i / adev->gfx.me.num_queue_per_pipe) - % adev->gfx.me.num_pipe_per_me; me = (i / adev->gfx.me.num_queue_per_pipe) / adev->gfx.me.num_pipe_per_me; @@ -320,8 +319,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, return r; } -void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq) +void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring) { amdgpu_device_wb_free(ring->adev, ring->adev->virt.reg_val_offs); amdgpu_ring_fini(ring); @@ -456,8 +454,6 @@ void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev) } ring = &adev->gfx.kiq.ring; - if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) - kfree(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]); kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]); amdgpu_bo_free_kernel(&ring->mqd_obj, &ring->mqd_gpu_addr, @@ -569,3 +565,102 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) mutex_unlock(&adev->gfx.gfx_off_mutex); } + +int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_fs_if fs_info = { + .sysfs_name = "gfx_err_count", + .debugfs_name = "gfx_err_inject", + }; + struct ras_ih_if ih_info = { + .cb = amdgpu_gfx_process_ras_data_cb, + }; + + if (!adev->gfx.ras_if) { + adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->gfx.ras_if) + return -ENOMEM; + adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX; + adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->gfx.ras_if->sub_block_index = 0; + strcpy(adev->gfx.ras_if->name, "gfx"); + } + fs_info.head = ih_info.head = *adev->gfx.ras_if; + + r = amdgpu_ras_late_init(adev, adev->gfx.ras_if, + &fs_info, &ih_info); + if (r) + goto free; + + if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) { + r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); + if (r) + goto late_fini; + } else { + /* free gfx ras_if if ras is not supported */ + r = 0; + goto free; + } + + return 0; +late_fini: + amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info); +free: + kfree(adev->gfx.ras_if); + adev->gfx.ras_if = NULL; + return r; +} + +void amdgpu_gfx_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) && + adev->gfx.ras_if) { + struct ras_common_if *ras_if = adev->gfx.ras_if; + struct ras_ih_if ih_info = { + .head = *ras_if, + .cb = amdgpu_gfx_process_ras_data_cb, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} + +int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry) +{ + /* TODO ue will trigger an interrupt. + * + * When “Full RAS” is enabled, the per-IP interrupt sources should + * be disabled and the driver should only look for the aggregated + * interrupt via sync flood + */ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + if (adev->gfx.funcs->query_ras_error_count) + adev->gfx.funcs->query_ras_error_count(adev, err_data); + amdgpu_ras_reset_gpu(adev, 0); + } + return AMDGPU_RAS_SUCCESS; +} + +int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct ras_common_if *ras_if = adev->gfx.ras_if; + struct ras_dispatch_if ih_data = { + .entry = entry, + }; + + if (!ras_if) + return 0; + + ih_data.head = *ras_if; + + DRM_ERROR("CP ECC ERROR IRQ\n"); + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 6d19183b478b..0ae0a2715b0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -201,28 +201,6 @@ struct amdgpu_gfx_funcs { int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status); }; -struct amdgpu_ngg_buf { - struct amdgpu_bo *bo; - uint64_t gpu_addr; - uint32_t size; - uint32_t bo_size; -}; - -enum { - NGG_PRIM = 0, - NGG_POS, - NGG_CNTL, - NGG_PARAM, - NGG_BUF_MAX -}; - -struct amdgpu_ngg { - struct amdgpu_ngg_buf buf[NGG_BUF_MAX]; - uint32_t gds_reserve_addr; - uint32_t gds_reserve_size; - bool init; -}; - struct sq_work { struct work_struct work; unsigned ih_data; @@ -247,7 +225,7 @@ struct amdgpu_me { uint32_t num_me; uint32_t num_pipe_per_me; uint32_t num_queue_per_pipe; - void *mqd_backup[AMDGPU_MAX_GFX_RINGS + 1]; + void *mqd_backup[AMDGPU_MAX_GFX_RINGS]; /* These are the resources for which amdgpu takes ownership */ DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES); @@ -312,9 +290,6 @@ struct amdgpu_gfx { uint32_t grbm_soft_reset; uint32_t srbm_soft_reset; - /* NGG */ - struct amdgpu_ngg ngg; - /* gfx off */ bool gfx_off_state; /* true: enabled, false: disabled */ struct mutex gfx_off_mutex; @@ -356,8 +331,7 @@ int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, struct amdgpu_ring *ring, struct amdgpu_irq_src *irq); -void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring, - struct amdgpu_irq_src *irq); +void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring); void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev); int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, @@ -385,5 +359,12 @@ void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit, bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); - +int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev); +void amdgpu_gfx_ras_fini(struct amdgpu_device *adev); +int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry); +int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 5790db61fa2c..a12f33c0f5df 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -27,6 +27,8 @@ #include <linux/io-64-nonatomic-lo-hi.h> #include "amdgpu.h" +#include "amdgpu_ras.h" +#include "amdgpu_xgmi.h" /** * amdgpu_gmc_get_pde_for_bo - get the PDE for a BO @@ -305,3 +307,29 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, gmc->fault_hash[hash].idx = gmc->last_fault++; return false; } + +int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) +{ + int r; + + if (adev->umc.funcs && adev->umc.funcs->ras_late_init) { + r = adev->umc.funcs->ras_late_init(adev); + if (r) + return r; + } + + if (adev->mmhub.funcs && adev->mmhub.funcs->ras_late_init) { + r = adev->mmhub.funcs->ras_late_init(adev); + if (r) + return r; + } + + return amdgpu_xgmi_ras_late_init(adev); +} + +void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) +{ + amdgpu_umc_ras_fini(adev); + amdgpu_mmhub_ras_fini(adev); + amdgpu_xgmi_ras_fini(adev); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index b6e1d98ef01e..b499a3de8bb6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -77,6 +77,7 @@ struct amdgpu_gmc_fault { struct amdgpu_vmhub { uint32_t ctx0_ptb_addr_lo32; uint32_t ctx0_ptb_addr_hi32; + uint32_t vm_inv_eng0_sem; uint32_t vm_inv_eng0_req; uint32_t vm_inv_eng0_ack; uint32_t vm_context0_cntl; @@ -99,12 +100,15 @@ struct amdgpu_gmc_funcs { unsigned pasid); /* enable/disable PRT support */ void (*set_prt)(struct amdgpu_device *adev, bool enable); - /* set pte flags based per asic */ - uint64_t (*get_vm_pte_flags)(struct amdgpu_device *adev, - uint32_t flags); + /* map mtype to hardware flags */ + uint64_t (*map_mtype)(struct amdgpu_device *adev, uint32_t flags); /* get the pde for a given mc addr */ void (*get_vm_pde)(struct amdgpu_device *adev, int level, u64 *dst, u64 *flags); + /* get the pte flags to use for a BO VA mapping */ + void (*get_vm_pte)(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags); }; struct amdgpu_xgmi { @@ -120,21 +124,52 @@ struct amdgpu_xgmi { /* gpu list in the same hive */ struct list_head head; bool supported; + struct ras_common_if *ras_if; }; struct amdgpu_gmc { + /* FB's physical address in MMIO space (for CPU to + * map FB). This is different compared to the agp/ + * gart/vram_start/end field as the later is from + * GPU's view and aper_base is from CPU's view. + */ resource_size_t aper_size; resource_size_t aper_base; /* for some chips with <= 32MB we need to lie * about vram size near mc fb location */ u64 mc_vram_size; u64 visible_vram_size; + /* AGP aperture start and end in MC address space + * Driver find a hole in the MC address space + * to place AGP by setting MC_VM_AGP_BOT/TOP registers + * Under VMID0, logical address == MC address. AGP + * aperture maps to physical bus or IOVA addressed. + * AGP aperture is used to simulate FB in ZFB case. + * AGP aperture is also used for page table in system + * memory (mainly for APU). + * + */ u64 agp_size; u64 agp_start; u64 agp_end; + /* GART aperture start and end in MC address space + * Driver find a hole in the MC address space + * to place GART by setting VM_CONTEXT0_PAGE_TABLE_START/END_ADDR + * registers + * Under VMID0, logical address inside GART aperture will + * be translated through gpuvm gart page table to access + * paged system memory + */ u64 gart_size; u64 gart_start; u64 gart_end; + /* Frame buffer aperture of this GPU device. Different from + * fb_start (see below), this only covers the local GPU device. + * Driver get fb_start from MC_VM_FB_LOCATION_BASE (set by vbios) + * and calculate vram_start of this local device by adding an + * offset inside the XGMI hive. + * Under VMID0, logical address == MC address + */ u64 vram_start; u64 vram_end; /* FB region , it's same as local vram region in single GPU, in XGMI @@ -153,6 +188,7 @@ struct amdgpu_gmc { uint32_t fw_version; struct amdgpu_irq_src vm_fault; uint32_t vram_type; + uint8_t vram_vendor; uint32_t srbm_soft_reset; bool prt_warning; uint64_t stolen_size; @@ -177,15 +213,14 @@ struct amdgpu_gmc { struct amdgpu_xgmi xgmi; struct amdgpu_irq_src ecc_irq; - struct ras_common_if *umc_ras_if; - struct ras_common_if *mmhub_ras_if; }; #define amdgpu_gmc_flush_gpu_tlb(adev, vmid, vmhub, type) ((adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid), (vmhub), (type))) #define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) #define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) +#define amdgpu_gmc_map_mtype(adev, flags) (adev)->gmc.gmc_funcs->map_mtype((adev),(flags)) #define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) -#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags)) +#define amdgpu_gmc_get_vm_pte(adev, mapping, flags) (adev)->gmc.gmc_funcs->get_vm_pte((adev), (mapping), (flags)) /** * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR @@ -230,5 +265,7 @@ void amdgpu_gmc_agp_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc); bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint64_t addr, uint16_t pasid, uint64_t timestamp); +int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev); +void amdgpu_gmc_ras_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c index 53734da1c2df..6f9289735e31 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c @@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, !dma_fence_is_later(updates, (*id)->flushed_updates)) updates = NULL; - if ((*id)->owner != vm->entity.fence_context || + if ((*id)->owner != vm->direct.fence_context || job->vm_pd_addr != (*id)->pd_gpu_addr || updates || !(*id)->last_flush || ((*id)->last_flush->context != fence_context && @@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, struct dma_fence *flushed; /* Check all the prerequisites to using this VMID */ - if ((*id)->owner != vm->entity.fence_context) + if ((*id)->owner != vm->direct.fence_context) continue; if ((*id)->pd_gpu_addr != job->vm_pd_addr) @@ -449,7 +449,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, } id->pd_gpu_addr = job->vm_pd_addr; - id->owner = vm->entity.fence_context; + id->owner = vm->direct.fence_context; if (job->vm_needs_flush) { dma_fence_put(id->last_flush); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 2a3f5ec298db..30d540d23b77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -87,10 +87,13 @@ static void amdgpu_hotplug_work_func(struct work_struct *work) struct drm_device *dev = adev->ddev; struct drm_mode_config *mode_config = &dev->mode_config; struct drm_connector *connector; + struct drm_connector_list_iter iter; mutex_lock(&mode_config->mutex); - list_for_each_entry(connector, &mode_config->connector_list, head) + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) amdgpu_connector_hotplug(connector); + drm_connector_list_iter_end(&iter); mutex_unlock(&mode_config->mutex); /* Just fire off a uevent and let userspace tell us what to do */ drm_helper_hpd_irq_event(dev); @@ -153,6 +156,20 @@ irqreturn_t amdgpu_irq_handler(int irq, void *arg) ret = amdgpu_ih_process(adev, &adev->irq.ih); if (ret == IRQ_HANDLED) pm_runtime_mark_last_busy(dev->dev); + + /* For the hardware that cannot enable bif ring for both ras_controller_irq + * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status + * register to check whether the interrupt is triggered or not, and properly + * ack the interrupt if it is there + */ + if (adev->nbio.funcs && + adev->nbio.funcs->handle_ras_controller_intr_no_bifring) + adev->nbio.funcs->handle_ras_controller_intr_no_bifring(adev); + + if (adev->nbio.funcs && + adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring) + adev->nbio.funcs->handle_ras_err_event_athub_intr_no_bifring(adev); + return ret; } @@ -228,10 +245,19 @@ int amdgpu_irq_init(struct amdgpu_device *adev) adev->irq.msi_enabled = false; if (amdgpu_msi_ok(adev)) { - int ret = pci_enable_msi(adev->pdev); - if (!ret) { + int nvec = pci_msix_vec_count(adev->pdev); + unsigned int flags; + + if (nvec <= 0) { + flags = PCI_IRQ_MSI; + } else { + flags = PCI_IRQ_MSI | PCI_IRQ_MSIX; + } + /* we only need one vector */ + nvec = pci_alloc_irq_vectors(adev->pdev, 1, 1, flags); + if (nvec > 0) { adev->irq.msi_enabled = true; - dev_dbg(adev->dev, "amdgpu: using MSI.\n"); + dev_dbg(adev->dev, "amdgpu: using MSI/MSI-X.\n"); } } @@ -254,7 +280,8 @@ int amdgpu_irq_init(struct amdgpu_device *adev) INIT_WORK(&adev->irq.ih2_work, amdgpu_irq_handle_ih2); adev->irq.installed = true; - r = drm_irq_install(adev->ddev, adev->ddev->pdev->irq); + /* Use vector 0 for MSI-X */ + r = drm_irq_install(adev->ddev, pci_irq_vector(adev->pdev, 0)); if (r) { adev->irq.installed = false; if (!amdgpu_device_has_dc_support(adev)) @@ -284,7 +311,7 @@ void amdgpu_irq_fini(struct amdgpu_device *adev) drm_irq_uninstall(adev->ddev); adev->irq.installed = false; if (adev->irq.msi_enabled) - pci_disable_msi(adev->pdev); + pci_free_irq_vectors(adev->pdev); if (!amdgpu_device_has_dc_support(adev)) flush_work(&adev->hotplug_work); } @@ -369,7 +396,7 @@ int amdgpu_irq_add_id(struct amdgpu_device *adev, * amdgpu_irq_dispatch - dispatch IRQ to IP blocks * * @adev: amdgpu device pointer - * @entry: interrupt vector pointer + * @ih: interrupt ring instance * * Dispatches IRQ to IP blocks. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 96b2a31ccfed..4fb20e870e63 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -248,6 +248,44 @@ static struct dma_fence *amdgpu_job_run(struct drm_sched_job *sched_job) return fence; } +#define to_drm_sched_job(sched_job) \ + container_of((sched_job), struct drm_sched_job, queue_node) + +void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched) +{ + struct drm_sched_job *s_job; + struct drm_sched_entity *s_entity = NULL; + int i; + + /* Signal all jobs not yet scheduled */ + for (i = DRM_SCHED_PRIORITY_MAX - 1; i >= DRM_SCHED_PRIORITY_MIN; i--) { + struct drm_sched_rq *rq = &sched->sched_rq[i]; + + if (!rq) + continue; + + spin_lock(&rq->lock); + list_for_each_entry(s_entity, &rq->entities, list) { + while ((s_job = to_drm_sched_job(spsc_queue_pop(&s_entity->job_queue)))) { + struct drm_sched_fence *s_fence = s_job->s_fence; + + dma_fence_signal(&s_fence->scheduled); + dma_fence_set_error(&s_fence->finished, -EHWPOISON); + dma_fence_signal(&s_fence->finished); + } + } + spin_unlock(&rq->lock); + } + + /* Signal all jobs already scheduled to HW */ + list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + struct drm_sched_fence *s_fence = s_job->s_fence; + + dma_fence_set_error(&s_fence->finished, -EHWPOISON); + dma_fence_signal(&s_fence->finished); + } +} + const struct drm_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_job_dependency, .run_job = amdgpu_job_run, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 51e62504c279..dc7ee9358dcd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -76,4 +76,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, void *owner, struct dma_fence **f); int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, struct dma_fence **fence); + +void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index a73206784cba..b6db28a570c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -583,9 +583,12 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file struct drm_amdgpu_info_vram_gtt vram_gtt; vram_gtt.vram_size = adev->gmc.real_vram_size - - atomic64_read(&adev->vram_pin_size); - vram_gtt.vram_cpu_accessible_size = adev->gmc.visible_vram_size - - atomic64_read(&adev->visible_pin_size); + atomic64_read(&adev->vram_pin_size) - + AMDGPU_VM_RESERVED_VRAM; + vram_gtt.vram_cpu_accessible_size = + min(adev->gmc.visible_vram_size - + atomic64_read(&adev->visible_pin_size), + vram_gtt.vram_size); vram_gtt.gtt_size = adev->mman.bdev.man[TTM_PL_TT].size; vram_gtt.gtt_size *= PAGE_SIZE; vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); @@ -598,15 +601,18 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file memset(&mem, 0, sizeof(mem)); mem.vram.total_heap_size = adev->gmc.real_vram_size; mem.vram.usable_heap_size = adev->gmc.real_vram_size - - atomic64_read(&adev->vram_pin_size); + atomic64_read(&adev->vram_pin_size) - + AMDGPU_VM_RESERVED_VRAM; mem.vram.heap_usage = amdgpu_vram_mgr_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); mem.vram.max_allocation = mem.vram.usable_heap_size * 3 / 4; mem.cpu_accessible_vram.total_heap_size = adev->gmc.visible_vram_size; - mem.cpu_accessible_vram.usable_heap_size = adev->gmc.visible_vram_size - - atomic64_read(&adev->visible_pin_size); + mem.cpu_accessible_vram.usable_heap_size = + min(adev->gmc.visible_vram_size - + atomic64_read(&adev->visible_pin_size), + mem.vram.usable_heap_size); mem.cpu_accessible_vram.heap_usage = amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM]); mem.cpu_accessible_vram.max_allocation = @@ -732,17 +738,6 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.vce_harvest_config = adev->vce.harvest_config; dev_info.gc_double_offchip_lds_buf = adev->gfx.config.double_offchip_lds_buf; - - if (amdgpu_ngg) { - dev_info.prim_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PRIM].gpu_addr; - dev_info.prim_buf_size = adev->gfx.ngg.buf[NGG_PRIM].size; - dev_info.pos_buf_gpu_addr = adev->gfx.ngg.buf[NGG_POS].gpu_addr; - dev_info.pos_buf_size = adev->gfx.ngg.buf[NGG_POS].size; - dev_info.cntl_sb_buf_gpu_addr = adev->gfx.ngg.buf[NGG_CNTL].gpu_addr; - dev_info.cntl_sb_buf_size = adev->gfx.ngg.buf[NGG_CNTL].size; - dev_info.param_buf_gpu_addr = adev->gfx.ngg.buf[NGG_PARAM].gpu_addr; - dev_info.param_buf_size = adev->gfx.ngg.buf[NGG_PARAM].size; - } dev_info.wave_front_size = adev->gfx.cu_info.wave_front_size; dev_info.num_shader_visible_vgprs = adev->gfx.config.max_gprs; dev_info.num_cu_per_sh = adev->gfx.config.max_cu_per_sh; @@ -971,6 +966,12 @@ int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) /* Ensure IB tests are run on ring */ flush_delayed_work(&adev->delayed_init_work); + + if (amdgpu_ras_intr_triggered()) { + DRM_ERROR("RAS Intr triggered, device disabled!!"); + return -EHWPOISON; + } + file_priv->driver_priv = NULL; r = pm_runtime_get_sync(dev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c new file mode 100644 index 000000000000..676c48c02d77 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c @@ -0,0 +1,70 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_ras.h" + +int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + struct ras_fs_if fs_info = { + .sysfs_name = "mmhub_err_count", + .debugfs_name = "mmhub_err_inject", + }; + + if (!adev->mmhub.ras_if) { + adev->mmhub.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->mmhub.ras_if) + return -ENOMEM; + adev->mmhub.ras_if->block = AMDGPU_RAS_BLOCK__MMHUB; + adev->mmhub.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->mmhub.ras_if->sub_block_index = 0; + strcpy(adev->mmhub.ras_if->name, "mmhub"); + } + ih_info.head = fs_info.head = *adev->mmhub.ras_if; + r = amdgpu_ras_late_init(adev, adev->mmhub.ras_if, + &fs_info, &ih_info); + if (r || !amdgpu_ras_is_supported(adev, adev->mmhub.ras_if->block)) { + kfree(adev->mmhub.ras_if); + adev->mmhub.ras_if = NULL; + } + + return r; +} + +void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) && + adev->mmhub.ras_if) { + struct ras_common_if *ras_if = adev->mmhub.ras_if; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index 2d75ecfa199b..1cd78940cf82 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -23,9 +23,17 @@ struct amdgpu_mmhub_funcs { void (*ras_init)(struct amdgpu_device *adev); + int (*ras_late_init)(struct amdgpu_device *adev); void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); }; +struct amdgpu_mmhub { + struct ras_common_if *ras_if; + const struct amdgpu_mmhub_funcs *funcs; +}; + +int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev); +void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c index 31d4deb5d294..392300f77b13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c @@ -136,6 +136,7 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn) * amdgpu_mn_read_lock - take the read side lock for this notifier * * @amn: our notifier + * @blockable: is the notifier blockable */ static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c new file mode 100644 index 000000000000..7d5c3a9de9ea --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included + * in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN + * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "amdgpu.h" +#include "amdgpu_ras.h" + +int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + struct ras_fs_if fs_info = { + .sysfs_name = "pcie_bif_err_count", + .debugfs_name = "pcie_bif_err_inject", + }; + + if (!adev->nbio.ras_if) { + adev->nbio.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->nbio.ras_if) + return -ENOMEM; + adev->nbio.ras_if->block = AMDGPU_RAS_BLOCK__PCIE_BIF; + adev->nbio.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->nbio.ras_if->sub_block_index = 0; + strcpy(adev->nbio.ras_if->name, "pcie_bif"); + } + ih_info.head = fs_info.head = *adev->nbio.ras_if; + r = amdgpu_ras_late_init(adev, adev->nbio.ras_if, + &fs_info, &ih_info); + if (r) + goto free; + + if (amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { + r = amdgpu_irq_get(adev, &adev->nbio.ras_controller_irq, 0); + if (r) + goto late_fini; + r = amdgpu_irq_get(adev, &adev->nbio.ras_err_event_athub_irq, 0); + if (r) + goto late_fini; + } else { + r = 0; + goto free; + } + + return 0; +late_fini: + amdgpu_ras_late_fini(adev, adev->nbio.ras_if, &ih_info); +free: + kfree(adev->nbio.ras_if); + adev->nbio.ras_if = NULL; + return r; +} + +void amdgpu_nbio_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF) && + adev->nbio.ras_if) { + struct ras_common_if *ras_if = adev->nbio.ras_if; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h new file mode 100644 index 000000000000..919bd566ba3c --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -0,0 +1,101 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __AMDGPU_NBIO_H__ +#define __AMDGPU_NBIO_H__ + +/* + * amdgpu nbio functions + */ +struct nbio_hdp_flush_reg { + u32 ref_and_mask_cp0; + u32 ref_and_mask_cp1; + u32 ref_and_mask_cp2; + u32 ref_and_mask_cp3; + u32 ref_and_mask_cp4; + u32 ref_and_mask_cp5; + u32 ref_and_mask_cp6; + u32 ref_and_mask_cp7; + u32 ref_and_mask_cp8; + u32 ref_and_mask_cp9; + u32 ref_and_mask_sdma0; + u32 ref_and_mask_sdma1; + u32 ref_and_mask_sdma2; + u32 ref_and_mask_sdma3; + u32 ref_and_mask_sdma4; + u32 ref_and_mask_sdma5; + u32 ref_and_mask_sdma6; + u32 ref_and_mask_sdma7; +}; + +struct amdgpu_nbio_funcs { + const struct nbio_hdp_flush_reg *hdp_flush_reg; + u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev); + u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev); + u32 (*get_pcie_index_offset)(struct amdgpu_device *adev); + u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); + u32 (*get_rev_id)(struct amdgpu_device *adev); + void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); + void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); + u32 (*get_memsize)(struct amdgpu_device *adev); + void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, + bool use_doorbell, int doorbell_index, int doorbell_size); + void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell, + int doorbell_index, int instance); + void (*enable_doorbell_aperture)(struct amdgpu_device *adev, + bool enable); + void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, + bool enable); + void (*ih_doorbell_range)(struct amdgpu_device *adev, + bool use_doorbell, int doorbell_index); + void (*enable_doorbell_interrupt)(struct amdgpu_device *adev, + bool enable); + void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, + bool enable); + void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev, + bool enable); + void (*get_clockgating_state)(struct amdgpu_device *adev, + u32 *flags); + void (*ih_control)(struct amdgpu_device *adev); + void (*init_registers)(struct amdgpu_device *adev); + void (*detect_hw_virt)(struct amdgpu_device *adev); + void (*remap_hdp_registers)(struct amdgpu_device *adev); + void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev); + void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev); + int (*init_ras_controller_interrupt)(struct amdgpu_device *adev); + int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev); + void (*query_ras_error_count)(struct amdgpu_device *adev, + void *ras_error_status); + int (*ras_late_init)(struct amdgpu_device *adev); +}; + +struct amdgpu_nbio { + const struct nbio_hdp_flush_reg *hdp_flush_reg; + struct amdgpu_irq_src ras_controller_irq; + struct amdgpu_irq_src ras_err_event_athub_irq; + struct ras_common_if *ras_if; + const struct amdgpu_nbio_funcs *funcs; +}; + +int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev); +void amdgpu_nbio_ras_fini(struct amdgpu_device *adev); +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 7289e1b4fb60..e3f16b49e970 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -343,6 +343,70 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, } /** + * amdgpu_bo_create_kernel_at - create BO for kernel use at specific location + * + * @adev: amdgpu device object + * @offset: offset of the BO + * @size: size of the BO + * @domain: where to place it + * @bo_ptr: used to initialize BOs in structures + * @cpu_addr: optional CPU address mapping + * + * Creates a kernel BO at a specific offset in the address space of the domain. + * + * Returns: + * 0 on success, negative error code otherwise. + */ +int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, + uint64_t offset, uint64_t size, uint32_t domain, + struct amdgpu_bo **bo_ptr, void **cpu_addr) +{ + struct ttm_operation_ctx ctx = { false, false }; + unsigned int i; + int r; + + offset &= PAGE_MASK; + size = ALIGN(size, PAGE_SIZE); + + r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr, + NULL, cpu_addr); + if (r) + return r; + + /* + * Remove the original mem node and create a new one at the request + * position. + */ + if (cpu_addr) + amdgpu_bo_kunmap(*bo_ptr); + + ttm_bo_mem_put(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem); + + for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) { + (*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT; + (*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; + } + r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement, + &(*bo_ptr)->tbo.mem, &ctx); + if (r) + goto error; + + if (cpu_addr) { + r = amdgpu_bo_kmap(*bo_ptr, cpu_addr); + if (r) + goto error; + } + + amdgpu_bo_unreserve(*bo_ptr); + return 0; + +error: + amdgpu_bo_unreserve(*bo_ptr); + amdgpu_bo_unref(bo_ptr); + return r; +} + +/** * amdgpu_bo_free_kernel - free BO for kernel use * * @bo: amdgpu BO to free @@ -451,7 +515,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev, { struct ttm_operation_ctx ctx = { .interruptible = (bp->type != ttm_bo_type_kernel), - .no_wait_gpu = false, + .no_wait_gpu = bp->no_wait_gpu, .resv = bp->resv, .flags = bp->type != ttm_bo_type_kernel ? TTM_OPT_FLAG_ALLOW_RES_EVICT : 0 @@ -1059,7 +1123,10 @@ void amdgpu_bo_fini(struct amdgpu_device *adev) int amdgpu_bo_fbdev_mmap(struct amdgpu_bo *bo, struct vm_area_struct *vma) { - return ttm_fbdev_mmap(vma, &bo->tbo); + if (vma->vm_pgoff != 0) + return -EACCES; + + return ttm_bo_mmap_obj(vma, &bo->tbo); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 658f4c9779b7..7e99f6c58c48 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -41,6 +41,7 @@ struct amdgpu_bo_param { u32 preferred_domain; u64 flags; enum ttm_bo_type type; + bool no_wait_gpu; struct dma_resv *resv; }; @@ -237,6 +238,9 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, unsigned long size, int align, u32 domain, struct amdgpu_bo **bo_ptr, u64 *gpu_addr, void **cpu_addr); +int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, + uint64_t offset, uint64_t size, uint32_t domain, + struct amdgpu_bo **bo_ptr, void **cpu_addr); void amdgpu_bo_free_kernel(struct amdgpu_bo **bo, u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 03930313c263..f205f56e3358 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -161,7 +161,7 @@ static ssize_t amdgpu_get_dpm_state(struct device *dev, if (is_support_sw_smu(adev)) { if (adev->smu.ppt_funcs->get_current_power_state) - pm = amdgpu_smu_get_current_power_state(adev); + pm = smu_get_current_power_state(&adev->smu); else pm = adev->pm.dpm.user_state; } else if (adev->powerplay.pp_funcs->get_current_power_state) { @@ -805,8 +805,7 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev, } /** - * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk - * pp_dpm_pcie + * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie * * The amdgpu driver provides a sysfs API for adjusting what power levels * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, @@ -822,9 +821,15 @@ static ssize_t amdgpu_get_pp_feature_status(struct device *dev, * * To manually adjust these states, first select manual using * power_dpm_force_performance_level. - * Secondly,Enter a new value for each level by inputing a string that + * Secondly, enter a new value for each level by inputing a string that * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" - * E.g., echo 4 5 6 to > pp_dpm_sclk will enable sclk levels 4, 5, and 6. + * E.g., + * + * .. code-block:: bash + * + * echo "4 5 6" > pp_dpm_sclk + * + * will enable sclk levels 4, 5, and 6. * * NOTE: change to the dcefclk max dpm level is not supported now */ @@ -902,7 +907,7 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); @@ -949,7 +954,7 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); @@ -989,7 +994,7 @@ static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); @@ -1029,7 +1034,7 @@ static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); @@ -1069,7 +1074,7 @@ static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); @@ -1109,7 +1114,7 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, return ret; if (is_support_sw_smu(adev)) - ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask); + ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true); else if (adev->powerplay.pp_funcs->force_clock_level) ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); @@ -1301,7 +1306,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, } parameter[parameter_size] = profile_mode; if (is_support_sw_smu(adev)) - ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size); + ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); else if (adev->powerplay.pp_funcs->set_power_profile_mode) ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); if (!ret) @@ -2010,7 +2015,7 @@ static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, uint32_t limit = 0; if (is_support_sw_smu(adev)) { - smu_get_power_limit(&adev->smu, &limit, true); + smu_get_power_limit(&adev->smu, &limit, true, true); return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); @@ -2028,7 +2033,7 @@ static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, uint32_t limit = 0; if (is_support_sw_smu(adev)) { - smu_get_power_limit(&adev->smu, &limit, false); + smu_get_power_limit(&adev->smu, &limit, false, true); return snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); @@ -2196,9 +2201,9 @@ static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, * * - fan1_input: fan speed in RPM * - * - fan[1-*]_target: Desired fan speed Unit: revolution/min (RPM) + * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM) * - * - fan[1-*]_enable: Enable or disable the sensors.1: Enable 0: Disable + * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable * * hwmon interfaces for GPU clocks: * @@ -2825,6 +2830,19 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) DRM_ERROR("failed to create device file pp_dpm_sclk\n"); return ret; } + + /* Arcturus does not support standalone mclk/socclk/fclk level setting */ + if (adev->asic_type == CHIP_ARCTURUS) { + dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO; + dev_attr_pp_dpm_mclk.store = NULL; + + dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO; + dev_attr_pp_dpm_socclk.store = NULL; + + dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO; + dev_attr_pp_dpm_fclk.store = NULL; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); if (ret) { DRM_ERROR("failed to create device file pp_dpm_mclk\n"); @@ -3008,7 +3026,8 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; smu_handle_task(&adev->smu, smu_dpm->dpm_level, - AMD_PP_TASK_DISPLAY_CONFIG_CHANGE); + AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, + true); } else { if (adev->powerplay.pp_funcs->dispatch_tasks) { if (!amdgpu_device_has_dc_support(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index a46090071034..2770cba56a6b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -34,6 +34,8 @@ #include "psp_v11_0.h" #include "psp_v12_0.h" +#include "amdgpu_ras.h" + static void psp_set_funcs(struct amdgpu_device *adev); static int psp_early_init(void *handle) @@ -88,6 +90,17 @@ static int psp_sw_init(void *handle) return ret; } + ret = psp_mem_training_init(psp); + if (ret) { + DRM_ERROR("Failed to initialize memory training!\n"); + return ret; + } + ret = psp_mem_training(psp, PSP_MEM_TRAIN_COLD_BOOT); + if (ret) { + DRM_ERROR("Failed to process memory training!\n"); + return ret; + } + return 0; } @@ -95,6 +108,7 @@ static int psp_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + psp_mem_training_fini(&adev->psp); release_firmware(adev->psp.sos_fw); adev->psp.sos_fw = NULL; release_firmware(adev->psp.asd_fw); @@ -151,10 +165,19 @@ psp_cmd_submit_buf(struct psp_context *psp, return ret; } + amdgpu_asic_invalidate_hdp(psp->adev, NULL); while (*((unsigned int *)psp->fence_buf) != index) { if (--timeout == 0) break; + /* + * Shouldn't wait for timeout when err_event_athub occurs, + * because gpu reset thread triggered and lock resource should + * be released for psp resume sequence. + */ + if (amdgpu_ras_intr_triggered()) + break; msleep(1); + amdgpu_asic_invalidate_hdp(psp->adev, NULL); } /* In some cases, psp response status is not 0 even there is no @@ -168,8 +191,9 @@ psp_cmd_submit_buf(struct psp_context *psp, if (ucode) DRM_WARN("failed to load ucode id (%d) ", ucode->ucode_id); - DRM_WARN("psp command failed and response status is (0x%X)\n", - psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK); + DRM_DEBUG_DRIVER("psp command (0x%X) failed and response status is (0x%X)\n", + psp->cmd_buf_mem->cmd_id, + psp->cmd_buf_mem->resp.status & GFX_CMD_STATUS_MASK); if (!timeout) { mutex_unlock(&psp->mutex); return -EINVAL; @@ -253,7 +277,8 @@ static int psp_tmr_init(struct psp_context *psp) /* For ASICs support RLC autoload, psp will parse the toc * and calculate the total size of TMR needed */ - if (psp->toc_start_addr && + if (!amdgpu_sriov_vf(psp->adev) && + psp->toc_start_addr && psp->toc_bin_size && psp->fw_pri_buf) { ret = psp_load_toc(psp, &tmr_size); @@ -287,15 +312,9 @@ static int psp_tmr_load(struct psp_context *psp) ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); - if (ret) - goto failed; kfree(cmd); - return 0; - -failed: - kfree(cmd); return ret; } @@ -548,7 +567,9 @@ static int psp_xgmi_initialize(struct psp_context *psp) struct ta_xgmi_shared_memory *xgmi_cmd; int ret; - if (!psp->adev->psp.ta_fw) + if (!psp->adev->psp.ta_fw || + !psp->adev->psp.ta_xgmi_ucode_size || + !psp->adev->psp.ta_xgmi_start_addr) return -ENOENT; if (!psp->xgmi_context.initialized) { @@ -737,6 +758,12 @@ static int psp_ras_terminate(struct psp_context *psp) { int ret; + /* + * TODO: bypass the terminate in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + if (!psp->ras.ras_initialized) return 0; @@ -758,6 +785,18 @@ static int psp_ras_initialize(struct psp_context *psp) { int ret; + /* + * TODO: bypass the initialize in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->adev->psp.ta_ras_ucode_size || + !psp->adev->psp.ta_ras_start_addr) { + dev_warn(psp->adev->dev, "RAS: ras ta ucode is not available\n"); + return 0; + } + if (!psp->ras.ras_initialized) { ret = psp_ras_init_shared_buf(psp); if (ret) @@ -772,6 +811,360 @@ static int psp_ras_initialize(struct psp_context *psp) } // ras end +// HDCP start +static void psp_prep_hdcp_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint64_t hdcp_ta_mc, + uint64_t hdcp_mc_shared, + uint32_t hdcp_ta_size, + uint32_t shared_size) +{ + cmd->cmd_id = GFX_CMD_ID_LOAD_TA; + cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(hdcp_ta_mc); + cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(hdcp_ta_mc); + cmd->cmd.cmd_load_ta.app_len = hdcp_ta_size; + + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = + lower_32_bits(hdcp_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = + upper_32_bits(hdcp_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; +} + +static int psp_hdcp_init_shared_buf(struct psp_context *psp) +{ + int ret; + + /* + * Allocate 16k memory aligned to 4k from Frame Buffer (local + * physical) for hdcp ta <-> Driver + */ + ret = amdgpu_bo_create_kernel(psp->adev, PSP_HDCP_SHARED_MEM_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &psp->hdcp_context.hdcp_shared_bo, + &psp->hdcp_context.hdcp_shared_mc_addr, + &psp->hdcp_context.hdcp_shared_buf); + + return ret; +} + +static int psp_hdcp_load(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + memcpy(psp->fw_pri_buf, psp->ta_hdcp_start_addr, + psp->ta_hdcp_ucode_size); + + psp_prep_hdcp_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, + psp->hdcp_context.hdcp_shared_mc_addr, + psp->ta_hdcp_ucode_size, + PSP_HDCP_SHARED_MEM_SIZE); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + if (!ret) { + psp->hdcp_context.hdcp_initialized = 1; + psp->hdcp_context.session_id = cmd->resp.session_id; + } + + kfree(cmd); + + return ret; +} +static int psp_hdcp_initialize(struct psp_context *psp) +{ + int ret; + + /* + * TODO: bypass the initialize in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->adev->psp.ta_hdcp_ucode_size || + !psp->adev->psp.ta_hdcp_start_addr) { + dev_warn(psp->adev->dev, "HDCP: hdcp ta ucode is not available\n"); + return 0; + } + + if (!psp->hdcp_context.hdcp_initialized) { + ret = psp_hdcp_init_shared_buf(psp); + if (ret) + return ret; + } + + ret = psp_hdcp_load(psp); + if (ret) + return ret; + + return 0; +} +static void psp_prep_hdcp_ta_unload_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t hdcp_session_id) +{ + cmd->cmd_id = GFX_CMD_ID_UNLOAD_TA; + cmd->cmd.cmd_unload_ta.session_id = hdcp_session_id; +} + +static int psp_hdcp_unload(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the unloading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_hdcp_ta_unload_cmd_buf(cmd, psp->hdcp_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + +static void psp_prep_hdcp_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t ta_cmd_id, + uint32_t hdcp_session_id) +{ + cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; + cmd->cmd.cmd_invoke_cmd.session_id = hdcp_session_id; + cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; + /* Note: cmd_invoke_cmd.buf is not used for now */ +} + +int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_hdcp_ta_invoke_cmd_buf(cmd, ta_cmd_id, + psp->hdcp_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + +static int psp_hdcp_terminate(struct psp_context *psp) +{ + int ret; + + /* + * TODO: bypass the terminate in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->hdcp_context.hdcp_initialized) + return 0; + + ret = psp_hdcp_unload(psp); + if (ret) + return ret; + + psp->hdcp_context.hdcp_initialized = 0; + + /* free hdcp shared memory */ + amdgpu_bo_free_kernel(&psp->hdcp_context.hdcp_shared_bo, + &psp->hdcp_context.hdcp_shared_mc_addr, + &psp->hdcp_context.hdcp_shared_buf); + + return 0; +} +// HDCP end + +// DTM start +static void psp_prep_dtm_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint64_t dtm_ta_mc, + uint64_t dtm_mc_shared, + uint32_t dtm_ta_size, + uint32_t shared_size) +{ + cmd->cmd_id = GFX_CMD_ID_LOAD_TA; + cmd->cmd.cmd_load_ta.app_phy_addr_lo = lower_32_bits(dtm_ta_mc); + cmd->cmd.cmd_load_ta.app_phy_addr_hi = upper_32_bits(dtm_ta_mc); + cmd->cmd.cmd_load_ta.app_len = dtm_ta_size; + + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_lo = lower_32_bits(dtm_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_phy_addr_hi = upper_32_bits(dtm_mc_shared); + cmd->cmd.cmd_load_ta.cmd_buf_len = shared_size; +} + +static int psp_dtm_init_shared_buf(struct psp_context *psp) +{ + int ret; + + /* + * Allocate 16k memory aligned to 4k from Frame Buffer (local + * physical) for dtm ta <-> Driver + */ + ret = amdgpu_bo_create_kernel(psp->adev, PSP_DTM_SHARED_MEM_SIZE, + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, + &psp->dtm_context.dtm_shared_bo, + &psp->dtm_context.dtm_shared_mc_addr, + &psp->dtm_context.dtm_shared_buf); + + return ret; +} + +static int psp_dtm_load(struct psp_context *psp) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + memset(psp->fw_pri_buf, 0, PSP_1_MEG); + memcpy(psp->fw_pri_buf, psp->ta_dtm_start_addr, psp->ta_dtm_ucode_size); + + psp_prep_dtm_ta_load_cmd_buf(cmd, psp->fw_pri_mc_addr, + psp->dtm_context.dtm_shared_mc_addr, + psp->ta_dtm_ucode_size, + PSP_DTM_SHARED_MEM_SIZE); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + if (!ret) { + psp->dtm_context.dtm_initialized = 1; + psp->dtm_context.session_id = cmd->resp.session_id; + } + + kfree(cmd); + + return ret; +} + +static int psp_dtm_initialize(struct psp_context *psp) +{ + int ret; + + /* + * TODO: bypass the initialize in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->adev->psp.ta_dtm_ucode_size || + !psp->adev->psp.ta_dtm_start_addr) { + dev_warn(psp->adev->dev, "DTM: dtm ta ucode is not available\n"); + return 0; + } + + if (!psp->dtm_context.dtm_initialized) { + ret = psp_dtm_init_shared_buf(psp); + if (ret) + return ret; + } + + ret = psp_dtm_load(psp); + if (ret) + return ret; + + return 0; +} + +static void psp_prep_dtm_ta_invoke_cmd_buf(struct psp_gfx_cmd_resp *cmd, + uint32_t ta_cmd_id, + uint32_t dtm_session_id) +{ + cmd->cmd_id = GFX_CMD_ID_INVOKE_CMD; + cmd->cmd.cmd_invoke_cmd.session_id = dtm_session_id; + cmd->cmd.cmd_invoke_cmd.ta_cmd_id = ta_cmd_id; + /* Note: cmd_invoke_cmd.buf is not used for now */ +} + +int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id) +{ + int ret; + struct psp_gfx_cmd_resp *cmd; + + /* + * TODO: bypass the loading in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + psp_prep_dtm_ta_invoke_cmd_buf(cmd, ta_cmd_id, + psp->dtm_context.session_id); + + ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr); + + kfree(cmd); + + return ret; +} + +static int psp_dtm_terminate(struct psp_context *psp) +{ + int ret; + + /* + * TODO: bypass the terminate in sriov for now + */ + if (amdgpu_sriov_vf(psp->adev)) + return 0; + + if (!psp->dtm_context.dtm_initialized) + return 0; + + ret = psp_hdcp_unload(psp); + if (ret) + return ret; + + psp->dtm_context.dtm_initialized = 0; + + /* free hdcp shared memory */ + amdgpu_bo_free_kernel(&psp->dtm_context.dtm_shared_bo, + &psp->dtm_context.dtm_shared_mc_addr, + &psp->dtm_context.dtm_shared_buf); + + return 0; +} +// DTM end + static int psp_hw_start(struct psp_context *psp) { struct amdgpu_device *adev = psp->adev; @@ -845,6 +1238,16 @@ static int psp_hw_start(struct psp_context *psp) if (ret) dev_err(psp->adev->dev, "RAS: Failed to initialize RAS\n"); + + ret = psp_hdcp_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "HDCP: Failed to initialize HDCP\n"); + + ret = psp_dtm_initialize(psp); + if (ret) + dev_err(psp->adev->dev, + "DTM: Failed to initialize DTM\n"); } return 0; @@ -1064,7 +1467,10 @@ out: || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA5 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA6 || ucode->ucode_id == AMDGPU_UCODE_ID_SDMA7 - || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G)) + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_G + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM + || ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) /*skip ucode loading in SRIOV VF */ continue; @@ -1073,10 +1479,6 @@ out: ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT)) /* skip mec JT when autoload is enabled */ continue; - /* Renoir only needs to load mec jump table one time */ - if (adev->asic_type == CHIP_RENOIR && - ucode->ucode_id == AMDGPU_UCODE_ID_CP_MEC2_JT) - continue; psp_print_fw_hdr(psp, ucode); @@ -1085,7 +1487,8 @@ out: return ret; /* Start rlc autoload after psp recieved all the gfx firmware */ - if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) { + if (psp->autoload_supported && ucode->ucode_id == + AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM) { ret = psp_rlc_autoload(psp); if (ret) { DRM_ERROR("Failed to start rlc autoload\n"); @@ -1210,8 +1613,11 @@ static int psp_hw_fini(void *handle) psp->xgmi_context.initialized == 1) psp_xgmi_terminate(psp); - if (psp->adev->psp.ta_fw) + if (psp->adev->psp.ta_fw) { psp_ras_terminate(psp); + psp_dtm_terminate(psp); + psp_hdcp_terminate(psp); + } psp_ring_destroy(psp, PSP_RING_TYPE__KM); @@ -1253,6 +1659,16 @@ static int psp_suspend(void *handle) DRM_ERROR("Failed to terminate ras ta\n"); return ret; } + ret = psp_hdcp_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate hdcp ta\n"); + return ret; + } + ret = psp_dtm_terminate(psp); + if (ret) { + DRM_ERROR("Failed to terminate dtm ta\n"); + return ret; + } } ret = psp_ring_stop(psp, PSP_RING_TYPE__KM); @@ -1272,6 +1688,12 @@ static int psp_resume(void *handle) DRM_INFO("PSP is resuming...\n"); + ret = psp_mem_training(psp, PSP_MEM_TRAIN_RESUME); + if (ret) { + DRM_ERROR("Failed to process memory training!\n"); + return ret; + } + mutex_lock(&adev->firmware.mutex); ret = psp_hw_start(psp); @@ -1311,9 +1733,6 @@ int psp_rlc_autoload_start(struct psp_context *psp) int ret; struct psp_gfx_cmd_resp *cmd; - if (amdgpu_sriov_vf(psp->adev)) - return 0; - cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL); if (!cmd) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index bc0947f6bc8a..09c5474ebcc3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -37,6 +37,9 @@ #define PSP_RAS_SHARED_MEM_SIZE 0x4000 #define PSP_1_MEG 0x100000 #define PSP_TMR_SIZE 0x400000 +#define PSP_HDCP_SHARED_MEM_SIZE 0x4000 +#define PSP_DTM_SHARED_MEM_SIZE 0x4000 +#define PSP_SHARED_MEM_SIZE 0x4000 struct psp_context; struct psp_xgmi_node_info; @@ -46,6 +49,8 @@ enum psp_bootloader_cmd { PSP_BL__LOAD_SYSDRV = 0x10000, PSP_BL__LOAD_SOSDRV = 0x20000, PSP_BL__LOAD_KEY_DATABASE = 0x80000, + PSP_BL__DRAM_LONG_TRAIN = 0x100000, + PSP_BL__DRAM_SHORT_TRAIN = 0x200000, }; enum psp_ring_type @@ -108,6 +113,9 @@ struct psp_funcs struct ta_ras_trigger_error_input *info); int (*ras_cure_posion)(struct psp_context *psp, uint64_t *mode_ptr); int (*rlc_autoload_start)(struct psp_context *psp); + int (*mem_training_init)(struct psp_context *psp); + void (*mem_training_fini)(struct psp_context *psp); + int (*mem_training)(struct psp_context *psp, uint32_t ops); }; #define AMDGPU_XGMI_MAX_CONNECTED_NODES 64 @@ -142,6 +150,65 @@ struct psp_ras_context { struct amdgpu_ras *ras; }; +struct psp_hdcp_context { + bool hdcp_initialized; + uint32_t session_id; + struct amdgpu_bo *hdcp_shared_bo; + uint64_t hdcp_shared_mc_addr; + void *hdcp_shared_buf; +}; + +struct psp_dtm_context { + bool dtm_initialized; + uint32_t session_id; + struct amdgpu_bo *dtm_shared_bo; + uint64_t dtm_shared_mc_addr; + void *dtm_shared_buf; +}; + +#define MEM_TRAIN_SYSTEM_SIGNATURE 0x54534942 +#define GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES 0x1000 +#define GDDR6_MEM_TRAINING_OFFSET 0x8000 + +enum psp_memory_training_init_flag { + PSP_MEM_TRAIN_NOT_SUPPORT = 0x0, + PSP_MEM_TRAIN_SUPPORT = 0x1, + PSP_MEM_TRAIN_INIT_FAILED = 0x2, + PSP_MEM_TRAIN_RESERVE_SUCCESS = 0x4, + PSP_MEM_TRAIN_INIT_SUCCESS = 0x8, +}; + +enum psp_memory_training_ops { + PSP_MEM_TRAIN_SEND_LONG_MSG = 0x1, + PSP_MEM_TRAIN_SAVE = 0x2, + PSP_MEM_TRAIN_RESTORE = 0x4, + PSP_MEM_TRAIN_SEND_SHORT_MSG = 0x8, + PSP_MEM_TRAIN_COLD_BOOT = PSP_MEM_TRAIN_SEND_LONG_MSG, + PSP_MEM_TRAIN_RESUME = PSP_MEM_TRAIN_SEND_SHORT_MSG, +}; + +struct psp_memory_training_context { + /*training data size*/ + u64 train_data_size; + /* + * sys_cache + * cpu virtual address + * system memory buffer that used to store the training data. + */ + void *sys_cache; + + /*vram offset of the p2c training data*/ + u64 p2c_train_data_offset; + struct amdgpu_bo *p2c_bo; + + /*vram offset of the c2p training data*/ + u64 c2p_train_data_offset; + struct amdgpu_bo *c2p_bo; + + enum psp_memory_training_init_flag init; + u32 training_cnt; +}; + struct psp_context { struct amdgpu_device *adev; @@ -206,9 +273,21 @@ struct psp_context uint32_t ta_ras_ucode_version; uint32_t ta_ras_ucode_size; uint8_t *ta_ras_start_addr; + + uint32_t ta_hdcp_ucode_version; + uint32_t ta_hdcp_ucode_size; + uint8_t *ta_hdcp_start_addr; + + uint32_t ta_dtm_ucode_version; + uint32_t ta_dtm_ucode_size; + uint8_t *ta_dtm_start_addr; + struct psp_xgmi_context xgmi_context; struct psp_ras_context ras; + struct psp_hdcp_context hdcp_context; + struct psp_dtm_context dtm_context; struct mutex mutex; + struct psp_memory_training_context mem_train_ctx; }; struct amdgpu_psp_funcs { @@ -251,6 +330,12 @@ struct amdgpu_psp_funcs { (psp)->funcs->xgmi_set_topology_info((psp), (num_device), (topology)) : -EINVAL) #define psp_rlc_autoload(psp) \ ((psp)->funcs->rlc_autoload_start ? (psp)->funcs->rlc_autoload_start((psp)) : 0) +#define psp_mem_training_init(psp) \ + ((psp)->funcs->mem_training_init ? (psp)->funcs->mem_training_init((psp)) : 0) +#define psp_mem_training_fini(psp) \ + ((psp)->funcs->mem_training_fini ? (psp)->funcs->mem_training_fini((psp)) : 0) +#define psp_mem_training(psp, ops) \ + ((psp)->funcs->mem_training ? (psp)->funcs->mem_training((psp), (ops)) : 0) #define amdgpu_psp_check_fw_loading_status(adev, i) (adev)->firmware.funcs->check_fw_loading_status((adev), (i)) @@ -279,6 +364,8 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_ras_enable_features(struct psp_context *psp, union ta_ras_cmd_input *info, bool enable); +int psp_hdcp_invoke(struct psp_context *psp, uint32_t ta_cmd_id); +int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id); int psp_rlc_autoload_start(struct psp_context *psp); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 016ea274b955..404483437bd3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -25,10 +25,13 @@ #include <linux/list.h> #include <linux/module.h> #include <linux/uaccess.h> +#include <linux/reboot.h> +#include <linux/syscalls.h> #include "amdgpu.h" #include "amdgpu_ras.h" #include "amdgpu_atomfirmware.h" +#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" const char *ras_error_string[] = { "none", @@ -65,11 +68,16 @@ const char *ras_block_string[] = { /* inject address is 52 bits */ #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) -static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, - uint64_t offset, uint64_t size, - struct amdgpu_bo **bo_ptr); -static int amdgpu_ras_release_vram(struct amdgpu_device *adev, - struct amdgpu_bo **bo_ptr); +enum amdgpu_ras_retire_page_reservation { + AMDGPU_RAS_RETIRE_PAGE_RESERVED, + AMDGPU_RAS_RETIRE_PAGE_PENDING, + AMDGPU_RAS_RETIRE_PAGE_FAULT, +}; + +atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); + +static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, + uint64_t addr); static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, size_t size, loff_t *pos) @@ -189,6 +197,10 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, return 0; } + +static struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, + struct ras_common_if *head); + /** * DOC: AMDGPU RAS debugfs control interface * @@ -208,31 +220,44 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, * As their names indicate, inject operation will write the * value to the address. * - * Second member: struct ras_debug_if::op. + * The second member: struct ras_debug_if::op. * It has three kinds of operations. - * 0: disable RAS on the block. Take ::head as its data. - * 1: enable RAS on the block. Take ::head as its data. - * 2: inject errors on the block. Take ::inject as its data. + * + * - 0: disable RAS on the block. Take ::head as its data. + * - 1: enable RAS on the block. Take ::head as its data. + * - 2: inject errors on the block. Take ::inject as its data. * * How to use the interface? - * programs: - * copy the struct ras_debug_if in your codes and initialize it. - * write the struct to the control node. * - * bash: - * echo op block [error [sub_blcok address value]] > .../ras/ras_ctrl - * op: disable, enable, inject - * disable: only block is needed - * enable: block and error are needed - * inject: error, address, value are needed - * block: umc, smda, gfx, ......... - * see ras_block_string[] for details - * error: ue, ce - * ue: multi_uncorrectable - * ce: single_correctable - * sub_block: sub block index, pass 0 if there is no sub block + * Programs + * + * Copy the struct ras_debug_if in your codes and initialize it. + * Write the struct to the control node. + * + * Shells + * + * .. code-block:: bash + * + * echo op block [error [sub_block address value]] > .../ras/ras_ctrl + * + * Parameters: + * + * op: disable, enable, inject + * disable: only block is needed + * enable: block and error are needed + * inject: error, address, value are needed + * block: umc, sdma, gfx, ......... + * see ras_block_string[] for details + * error: ue, ce + * ue: multi_uncorrectable + * ce: single_correctable + * sub_block: + * sub block index, pass 0 if there is no sub block + * + * here are some examples for bash commands: + * + * .. code-block:: bash * - * here are some examples for bash commands, * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl * echo inject umc ce 0 0 0 > /sys/kernel/debug/dri/0/ras/ras_ctrl * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl @@ -245,8 +270,11 @@ static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, * For inject, please check corresponding err count at * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count * - * NOTE: operation is only allowed on blocks which are supported. - * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask + * .. note:: + * Operations are only allowed on blocks which are supported. + * Please check ras mask at /sys/module/amdgpu/parameters/ras_mask + * to see which blocks support RAS on a particular asic. + * */ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user *buf, size_t size, loff_t *pos) @@ -276,6 +304,14 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * break; } + /* umc ce/ue error injection for a bad page is not allowed */ + if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && + amdgpu_ras_check_bad_page(adev, data.inject.address)) { + DRM_WARN("RAS WARN: 0x%llx has been marked as bad before error injection!\n", + data.inject.address); + break; + } + /* data.inject.address is offset instead of absolute gpu address */ ret = amdgpu_ras_error_inject(adev, &data.inject); break; @@ -290,6 +326,33 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, const char __user * return size; } +/** + * DOC: AMDGPU RAS debugfs EEPROM table reset interface + * + * Some boards contain an EEPROM which is used to persistently store a list of + * bad pages which experiences ECC errors in vram. This interface provides + * a way to reset the EEPROM, e.g., after testing error injection. + * + * Usage: + * + * .. code-block:: bash + * + * echo 1 > ../ras/ras_eeprom_reset + * + * will reset EEPROM table to 0 entries. + * + */ +static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; + int ret; + + ret = amdgpu_ras_eeprom_reset_table(&adev->psp.ras.ras->eeprom_control); + + return ret == 1 ? size : -EIO; +} + static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { .owner = THIS_MODULE, .read = NULL, @@ -297,6 +360,34 @@ static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { .llseek = default_llseek }; +static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { + .owner = THIS_MODULE, + .read = NULL, + .write = amdgpu_ras_debugfs_eeprom_write, + .llseek = default_llseek +}; + +/** + * DOC: AMDGPU RAS sysfs Error Count Interface + * + * It allows the user to read the error count for each IP block on the gpu through + * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count + * + * It outputs the multiple lines which report the uncorrected (ue) and corrected + * (ce) error counts. + * + * The format of one line is below, + * + * [ce|ue]: count + * + * Example: + * + * .. code-block:: bash + * + * ue: 0 + * ce: 1 + * + */ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, struct device_attribute *attr, char *buf) { @@ -475,15 +566,17 @@ int amdgpu_ras_feature_enable(struct amdgpu_device *adev, if (!(!!enable ^ !!amdgpu_ras_is_feature_enabled(adev, head))) return 0; - ret = psp_ras_enable_features(&adev->psp, &info, enable); - if (ret) { - DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n", - enable ? "enable":"disable", - ras_block_str(head->block), - ret); - if (ret == TA_RAS_STATUS__RESET_NEEDED) - return -EAGAIN; - return -EINVAL; + if (!amdgpu_ras_intr_triggered()) { + ret = psp_ras_enable_features(&adev->psp, &info, enable); + if (ret) { + DRM_ERROR("RAS ERROR: %s %s feature failed ret %d\n", + enable ? "enable":"disable", + ras_block_str(head->block), + ret); + if (ret == TA_RAS_STATUS__RESET_NEEDED) + return -EAGAIN; + return -EINVAL; + } } /* setup the obj */ @@ -615,8 +708,12 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, adev->gfx.funcs->query_ras_error_count(adev, &err_data); break; case AMDGPU_RAS_BLOCK__MMHUB: - if (adev->mmhub_funcs->query_ras_error_count) - adev->mmhub_funcs->query_ras_error_count(adev, &err_data); + if (adev->mmhub.funcs->query_ras_error_count) + adev->mmhub.funcs->query_ras_error_count(adev, &err_data); + break; + case AMDGPU_RAS_BLOCK__PCIE_BIF: + if (adev->nbio.funcs->query_ras_error_count) + adev->nbio.funcs->query_ras_error_count(adev, &err_data); break; default: break; @@ -628,12 +725,14 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev, info->ue_count = obj->err_data.ue_count; info->ce_count = obj->err_data.ce_count; - if (err_data.ce_count) + if (err_data.ce_count) { dev_info(adev->dev, "%ld correctable errors detected in %s block\n", obj->err_data.ce_count, ras_block_str(info->head.block)); - if (err_data.ue_count) + } + if (err_data.ue_count) { dev_info(adev->dev, "%ld uncorrectable errors detected in %s block\n", obj->err_data.ue_count, ras_block_str(info->head.block)); + } return 0; } @@ -664,6 +763,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, break; case AMDGPU_RAS_BLOCK__UMC: case AMDGPU_RAS_BLOCK__MMHUB: + case AMDGPU_RAS_BLOCK__XGMI_WAFL: + case AMDGPU_RAS_BLOCK__PCIE_BIF: ret = psp_ras_trigger_error(&adev->psp, &block_info); break; default: @@ -723,18 +824,18 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, static char *amdgpu_ras_badpage_flags_str(unsigned int flags) { switch (flags) { - case 0: + case AMDGPU_RAS_RETIRE_PAGE_RESERVED: return "R"; - case 1: + case AMDGPU_RAS_RETIRE_PAGE_PENDING: return "P"; - case 2: + case AMDGPU_RAS_RETIRE_PAGE_FAULT: default: return "F"; }; } -/* - * DOC: ras sysfs gpu_vram_bad_pages interface +/** + * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface * * It allows user to read the bad pages of vram on the gpu through * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages @@ -746,14 +847,21 @@ static char *amdgpu_ras_badpage_flags_str(unsigned int flags) * * gpu pfn and gpu page size are printed in hex format. * flags can be one of below character, + * * R: reserved, this gpu page is reserved and not able to use. + * * P: pending for reserve, this gpu page is marked as bad, will be reserved - * in next window of page_reserve. + * in next window of page_reserve. + * * F: unable to reserve. this gpu page can't be reserved due to some reasons. * - * examples: - * 0x00000001 : 0x00001000 : R - * 0x00000002 : 0x00001000 : P + * Examples: + * + * .. code-block:: bash + * + * 0x00000001 : 0x00001000 : R + * 0x00000002 : 0x00001000 : P + * */ static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, @@ -927,6 +1035,24 @@ static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) } /* sysfs end */ +/** + * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors + * + * Normally when there is an uncorrectable error, the driver will reset + * the GPU to recover. However, in the event of an unrecoverable error, + * the driver provides an interface to reboot the system automatically + * in that event. + * + * The following file in debugfs provides that interface: + * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot + * + * Usage: + * + * .. code-block:: bash + * + * echo true > .../ras/auto_reboot + * + */ /* debugfs begin */ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) { @@ -934,8 +1060,21 @@ static void amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) struct drm_minor *minor = adev->ddev->primary; con->dir = debugfs_create_dir("ras", minor->debugfs_root); - con->ent = debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir, - adev, &amdgpu_ras_debugfs_ctrl_ops); + debugfs_create_file("ras_ctrl", S_IWUGO | S_IRUGO, con->dir, + adev, &amdgpu_ras_debugfs_ctrl_ops); + debugfs_create_file("ras_eeprom_reset", S_IWUGO | S_IRUGO, con->dir, + adev, &amdgpu_ras_debugfs_eeprom_ops); + + /* + * After one uncorrectable error happens, usually GPU recovery will + * be scheduled. But due to the known problem in GPU recovery failing + * to bring GPU back, below interface provides one direct way to + * user to reboot system automatically in such case within + * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine + * will never be called. + */ + debugfs_create_bool("auto_reboot", S_IWUGO | S_IRUGO, con->dir, + &con->reboot); } void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, @@ -980,10 +1119,8 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev) amdgpu_ras_debugfs_remove(adev, &obj->head); } - debugfs_remove(con->ent); - debugfs_remove(con->dir); + debugfs_remove_recursive(con->dir); con->dir = NULL; - con->ent = NULL; } /* debugfs end */ @@ -1188,15 +1325,15 @@ static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, for (; i < data->count; i++) { (*bps)[i] = (struct ras_badpage){ - .bp = data->bps[i].bp, + .bp = data->bps[i].retired_page, .size = AMDGPU_GPU_PAGE_SIZE, - .flags = 0, + .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, }; if (data->last_reserved <= i) - (*bps)[i].flags = 1; - else if (data->bps[i].bo == NULL) - (*bps)[i].flags = 2; + (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; + else if (data->bps_bo[i] == NULL) + (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; } *count = data->count; @@ -1214,105 +1351,46 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) atomic_set(&ras->in_recovery, 0); } -static int amdgpu_ras_release_vram(struct amdgpu_device *adev, - struct amdgpu_bo **bo_ptr) -{ - /* no need to free it actually. */ - amdgpu_bo_free_kernel(bo_ptr, NULL, NULL); - return 0; -} - -/* reserve vram with size@offset */ -static int amdgpu_ras_reserve_vram(struct amdgpu_device *adev, - uint64_t offset, uint64_t size, - struct amdgpu_bo **bo_ptr) -{ - struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_bo_param bp; - int r = 0; - int i; - struct amdgpu_bo *bo; - - if (bo_ptr) - *bo_ptr = NULL; - memset(&bp, 0, sizeof(bp)); - bp.size = size; - bp.byte_align = PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; - bp.flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS | - AMDGPU_GEM_CREATE_NO_CPU_ACCESS; - bp.type = ttm_bo_type_kernel; - bp.resv = NULL; - - r = amdgpu_bo_create(adev, &bp, &bo); - if (r) - return -EINVAL; - - r = amdgpu_bo_reserve(bo, false); - if (r) - goto error_reserve; - - offset = ALIGN(offset, PAGE_SIZE); - for (i = 0; i < bo->placement.num_placement; ++i) { - bo->placements[i].fpfn = offset >> PAGE_SHIFT; - bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; - } - - ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); - r = ttm_bo_mem_space(&bo->tbo, &bo->placement, &bo->tbo.mem, &ctx); - if (r) - goto error_pin; - - r = amdgpu_bo_pin_restricted(bo, - AMDGPU_GEM_DOMAIN_VRAM, - offset, - offset + size); - if (r) - goto error_pin; - - if (bo_ptr) - *bo_ptr = bo; - - amdgpu_bo_unreserve(bo); - return r; - -error_pin: - amdgpu_bo_unreserve(bo); -error_reserve: - amdgpu_bo_unref(&bo); - return r; -} - /* alloc/realloc bps array */ static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, struct ras_err_handler_data *data, int pages) { unsigned int old_space = data->count + data->space_left; unsigned int new_space = old_space + pages; - unsigned int align_space = ALIGN(new_space, 1024); - void *tmp = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); - - if (!tmp) + unsigned int align_space = ALIGN(new_space, 512); + void *bps = kmalloc(align_space * sizeof(*data->bps), GFP_KERNEL); + struct amdgpu_bo **bps_bo = + kmalloc(align_space * sizeof(*data->bps_bo), GFP_KERNEL); + + if (!bps || !bps_bo) { + kfree(bps); + kfree(bps_bo); return -ENOMEM; + } if (data->bps) { - memcpy(tmp, data->bps, + memcpy(bps, data->bps, data->count * sizeof(*data->bps)); kfree(data->bps); } + if (data->bps_bo) { + memcpy(bps_bo, data->bps_bo, + data->count * sizeof(*data->bps_bo)); + kfree(data->bps_bo); + } - data->bps = tmp; + data->bps = bps; + data->bps_bo = bps_bo; data->space_left += align_space - old_space; return 0; } /* it deal with vram only. */ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, - unsigned long *bps, int pages) + struct eeprom_table_record *bps, int pages) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; - int i = pages; int ret = 0; if (!con || !con->eh_data || !bps || pages <= 0) @@ -1329,24 +1407,120 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, goto out; } - while (i--) - data->bps[data->count++].bp = bps[i]; - + memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps)); + data->count += pages; data->space_left -= pages; + out: mutex_unlock(&con->recovery_lock); return ret; } +/* + * write error record array to eeprom, the function should be + * protected by recovery_lock + */ +static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_err_handler_data *data; + struct amdgpu_ras_eeprom_control *control; + int save_count; + + if (!con || !con->eh_data) + return 0; + + control = &con->eeprom_control; + data = con->eh_data; + save_count = data->count - control->num_recs; + /* only new entries are saved */ + if (save_count > 0) + if (amdgpu_ras_eeprom_process_recods(control, + &data->bps[control->num_recs], + true, + save_count)) { + DRM_ERROR("Failed to save EEPROM table data!"); + return -EIO; + } + + return 0; +} + +/* + * read error record array in eeprom and reserve enough space for + * storing new bad pages + */ +static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) +{ + struct amdgpu_ras_eeprom_control *control = + &adev->psp.ras.ras->eeprom_control; + struct eeprom_table_record *bps = NULL; + int ret = 0; + + /* no bad page record, skip eeprom access */ + if (!control->num_recs) + return ret; + + bps = kcalloc(control->num_recs, sizeof(*bps), GFP_KERNEL); + if (!bps) + return -ENOMEM; + + if (amdgpu_ras_eeprom_process_recods(control, bps, false, + control->num_recs)) { + DRM_ERROR("Failed to load EEPROM table records!"); + ret = -EIO; + goto out; + } + + ret = amdgpu_ras_add_bad_pages(adev, bps, control->num_recs); + +out: + kfree(bps); + return ret; +} + +/* + * check if an address belongs to bad page + * + * Note: this check is only for umc block + */ +static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, + uint64_t addr) +{ + struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + struct ras_err_handler_data *data; + int i; + bool ret = false; + + if (!con || !con->eh_data) + return ret; + + mutex_lock(&con->recovery_lock); + data = con->eh_data; + if (!data) + goto out; + + addr >>= AMDGPU_GPU_PAGE_SHIFT; + for (i = 0; i < data->count; i++) + if (addr == data->bps[i].retired_page) { + ret = true; + goto out; + } + +out: + mutex_unlock(&con->recovery_lock); + return ret; +} + /* called in gpu recovery/init */ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data; uint64_t bp; - struct amdgpu_bo *bo; - int i; + struct amdgpu_bo *bo = NULL; + int i, ret = 0; if (!con || !con->eh_data) return 0; @@ -1357,18 +1531,29 @@ int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev) goto out; /* reserve vram at driver post stage. */ for (i = data->last_reserved; i < data->count; i++) { - bp = data->bps[i].bp; + bp = data->bps[i].retired_page; - if (amdgpu_ras_reserve_vram(adev, bp << PAGE_SHIFT, - PAGE_SIZE, &bo)) - DRM_ERROR("RAS ERROR: reserve vram %llx fail\n", bp); + /* There are two cases of reserve error should be ignored: + * 1) a ras bad page has been allocated (used by someone); + * 2) a ras bad page has been reserved (duplicate error injection + * for one page); + */ + if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, + AMDGPU_GPU_PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &bo, NULL)) + DRM_WARN("RAS WARN: reserve vram for retired page %llx fail\n", bp); - data->bps[i].bo = bo; + data->bps_bo[i] = bo; data->last_reserved = i + 1; + bo = NULL; } + + /* continue to save bad pages to eeprom even reesrve_vram fails */ + ret = amdgpu_ras_save_bad_pages(adev); out: mutex_unlock(&con->recovery_lock); - return 0; + return ret; } /* called when driver unload */ @@ -1388,11 +1573,11 @@ static int amdgpu_ras_release_bad_pages(struct amdgpu_device *adev) goto out; for (i = data->last_reserved - 1; i >= 0; i--) { - bo = data->bps[i].bo; + bo = data->bps_bo[i]; - amdgpu_ras_release_vram(adev, &bo); + amdgpu_bo_free_kernel(&bo, NULL, NULL); - data->bps[i].bo = bo; + data->bps_bo[i] = bo; data->last_reserved = i; } out: @@ -1400,41 +1585,54 @@ out: return 0; } -static int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev) -{ - /* TODO - * write the array to eeprom when SMU disabled. - */ - return 0; -} - -static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) -{ - /* TODO - * read the array to eeprom when SMU disabled. - */ - return 0; -} - -static int amdgpu_ras_recovery_init(struct amdgpu_device *adev) +int amdgpu_ras_recovery_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); - struct ras_err_handler_data **data = &con->eh_data; + struct ras_err_handler_data **data; + int ret; - *data = kmalloc(sizeof(**data), - GFP_KERNEL|__GFP_ZERO); - if (!*data) - return -ENOMEM; + if (con) + data = &con->eh_data; + else + return 0; + + *data = kmalloc(sizeof(**data), GFP_KERNEL | __GFP_ZERO); + if (!*data) { + ret = -ENOMEM; + goto out; + } mutex_init(&con->recovery_lock); INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); atomic_set(&con->in_recovery, 0); con->adev = adev; - amdgpu_ras_load_bad_pages(adev); - amdgpu_ras_reserve_bad_pages(adev); + ret = amdgpu_ras_eeprom_init(&con->eeprom_control); + if (ret) + goto free; + + if (con->eeprom_control.num_recs) { + ret = amdgpu_ras_load_bad_pages(adev); + if (ret) + goto free; + ret = amdgpu_ras_reserve_bad_pages(adev); + if (ret) + goto release; + } return 0; + +release: + amdgpu_ras_release_bad_pages(adev); +free: + kfree((*data)->bps); + kfree((*data)->bps_bo); + kfree(*data); + con->eh_data = NULL; +out: + DRM_WARN("Failed to initialize ras recovery!\n"); + + return ret; } static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) @@ -1442,13 +1640,17 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_err_handler_data *data = con->eh_data; + /* recovery_init failed to init it, fini is useless */ + if (!data) + return 0; + cancel_work_sync(&con->recovery_work); - amdgpu_ras_save_bad_pages(adev); amdgpu_ras_release_bad_pages(adev); mutex_lock(&con->recovery_lock); con->eh_data = NULL; kfree(data->bps); + kfree(data->bps_bo); kfree(data); mutex_unlock(&con->recovery_lock); @@ -1500,6 +1702,7 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev, int amdgpu_ras_init(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); + int r; if (con) return 0; @@ -1527,31 +1730,106 @@ int amdgpu_ras_init(struct amdgpu_device *adev) /* Might need get this flag from vbios. */ con->flags = RAS_DEFAULT_FLAGS; - if (amdgpu_ras_recovery_init(adev)) - goto recovery_out; + if (adev->nbio.funcs->init_ras_controller_interrupt) { + r = adev->nbio.funcs->init_ras_controller_interrupt(adev); + if (r) + return r; + } + + if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) { + r = adev->nbio.funcs->init_ras_err_event_athub_interrupt(adev); + if (r) + return r; + } amdgpu_ras_mask &= AMDGPU_RAS_BLOCK_MASK; if (amdgpu_ras_fs_init(adev)) goto fs_out; - /* ras init for each ras block */ - if (adev->umc.funcs->ras_init) - adev->umc.funcs->ras_init(adev); - DRM_INFO("RAS INFO: ras initialized successfully, " "hardware ability[%x] ras_mask[%x]\n", con->hw_supported, con->supported); return 0; fs_out: - amdgpu_ras_recovery_fini(adev); -recovery_out: amdgpu_ras_set_context(adev, NULL); kfree(con); return -EINVAL; } +/* helper function to handle common stuff in ip late init phase */ +int amdgpu_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_fs_if *fs_info, + struct ras_ih_if *ih_info) +{ + int r; + + /* disable RAS feature per IP block if it is not supported */ + if (!amdgpu_ras_is_supported(adev, ras_block->block)) { + amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); + return 0; + } + + r = amdgpu_ras_feature_enable_on_boot(adev, ras_block, 1); + if (r) { + if (r == -EAGAIN) { + /* request gpu reset. will run again */ + amdgpu_ras_request_reset_on_boot(adev, + ras_block->block); + return 0; + } else if (adev->in_suspend || adev->in_gpu_reset) { + /* in resume phase, if fail to enable ras, + * clean up all ras fs nodes, and disable ras */ + goto cleanup; + } else + return r; + } + + /* in resume phase, no need to create ras fs node */ + if (adev->in_suspend || adev->in_gpu_reset) + return 0; + + if (ih_info->cb) { + r = amdgpu_ras_interrupt_add_handler(adev, ih_info); + if (r) + goto interrupt; + } + + amdgpu_ras_debugfs_create(adev, fs_info); + + r = amdgpu_ras_sysfs_create(adev, fs_info); + if (r) + goto sysfs; + + return 0; +cleanup: + amdgpu_ras_sysfs_remove(adev, ras_block); +sysfs: + amdgpu_ras_debugfs_remove(adev, ras_block); + if (ih_info->cb) + amdgpu_ras_interrupt_remove_handler(adev, ih_info); +interrupt: + amdgpu_ras_feature_enable(adev, ras_block, 0); + return r; +} + +/* helper function to remove ras fs node and interrupt handler */ +void amdgpu_ras_late_fini(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_ih_if *ih_info) +{ + if (!ras_block || !ih_info) + return; + + amdgpu_ras_sysfs_remove(adev, ras_block); + amdgpu_ras_debugfs_remove(adev, ras_block); + if (ih_info->cb) + amdgpu_ras_interrupt_remove_handler(adev, ih_info); + amdgpu_ras_feature_enable(adev, ras_block, 0); +} + /* do some init work after IP late init as dependence. * and it runs in resume/gpu reset/booting up cases. */ @@ -1645,3 +1923,18 @@ int amdgpu_ras_fini(struct amdgpu_device *adev) return 0; } + +void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) +{ + uint32_t hw_supported, supported; + + amdgpu_ras_check_supported(adev, &hw_supported, &supported); + if (!hw_supported) + return; + + if (atomic_cmpxchg(&amdgpu_ras_in_intr, 0, 1) == 0) { + DRM_WARN("RAS event of type ERREVENT_ATHUB_INTERRUPT detected!\n"); + + amdgpu_ras_reset_gpu(adev, false); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 6c76bb2a6843..f80fd3428c98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -317,8 +317,6 @@ struct amdgpu_ras { struct list_head head; /* debugfs */ struct dentry *dir; - /* debugfs ctrl */ - struct dentry *ent; /* sysfs */ struct device_attribute features_attr; struct bin_attribute badpages_attr; @@ -334,7 +332,7 @@ struct amdgpu_ras { struct mutex recovery_lock; uint32_t flags; - + bool reboot; struct amdgpu_ras_eeprom_control eeprom_control; }; @@ -347,15 +345,14 @@ struct ras_err_data { unsigned long ue_count; unsigned long ce_count; unsigned long err_addr_cnt; - uint64_t *err_addr; + struct eeprom_table_record *err_addr; }; struct ras_err_handler_data { - /* point to bad pages array */ - struct { - unsigned long bp; - struct amdgpu_bo *bo; - } *bps; + /* point to bad page records array */ + struct eeprom_table_record *bps; + /* point to reserved bo array */ + struct amdgpu_bo **bps_bo; /* the count of entries */ int count; /* the space can place new entries */ @@ -365,7 +362,7 @@ struct ras_err_handler_data { }; typedef int (*ras_ih_cb)(struct amdgpu_device *adev, - struct ras_err_data *err_data, + void *err_data, struct amdgpu_iv_entry *entry); struct ras_ih_data { @@ -481,6 +478,7 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev, return ras && (ras->supported & (1 << block)); } +int amdgpu_ras_recovery_init(struct amdgpu_device *adev); int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, unsigned int block); @@ -492,7 +490,7 @@ unsigned long amdgpu_ras_query_error_count(struct amdgpu_device *adev, /* error handling functions */ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, - unsigned long *bps, int pages); + struct eeprom_table_record *bps, int pages); int amdgpu_ras_reserve_bad_pages(struct amdgpu_device *adev); @@ -501,6 +499,12 @@ static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev, { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + /* save bad page to eeprom before gpu reset, + * i2c may be unstable in gpu reset + */ + if (in_task()) + amdgpu_ras_reserve_bad_pages(adev); + if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) schedule_work(&ras->recovery_work); return 0; @@ -566,6 +570,13 @@ amdgpu_ras_error_to_ta(enum amdgpu_ras_error_type error) { int amdgpu_ras_init(struct amdgpu_device *adev); int amdgpu_ras_fini(struct amdgpu_device *adev); int amdgpu_ras_pre_fini(struct amdgpu_device *adev); +int amdgpu_ras_late_init(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_fs_if *fs_info, + struct ras_ih_if *ih_info); +void amdgpu_ras_late_fini(struct amdgpu_device *adev, + struct ras_common_if *ras_block, + struct ras_ih_if *ih_info); int amdgpu_ras_feature_enable(struct amdgpu_device *adev, struct ras_common_if *head, bool enable); @@ -599,4 +610,14 @@ int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, struct ras_dispatch_if *info); + +extern atomic_t amdgpu_ras_in_intr; + +static inline bool amdgpu_ras_intr_triggered(void) +{ + return !!atomic_read(&amdgpu_ras_in_intr); +} + +void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 8a32b5c93778..7de16c0c2f20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -100,7 +100,101 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control, return ret; } -static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control); + + +static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control) +{ + int i; + uint32_t tbl_sum = 0; + + /* Header checksum, skip checksum field in the calculation */ + for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++) + tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i); + + return tbl_sum; +} + +static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records, + int num) +{ + int i, j; + uint32_t tbl_sum = 0; + + /* Records checksum */ + for (i = 0; i < num; i++) { + struct eeprom_table_record *record = &records[i]; + + for (j = 0; j < sizeof(*record); j++) { + tbl_sum += *(((unsigned char *)record) + j); + } + } + + return tbl_sum; +} + +static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *records, int num) +{ + return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num); +} + +/* Checksum = 256 -((sum of all table entries) mod 256) */ +static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *records, int num, + uint32_t old_hdr_byte_sum) +{ + /* + * This will update the table sum with new records. + * + * TODO: What happens when the EEPROM table is to be wrapped around + * and old records from start will get overridden. + */ + + /* need to recalculate updated header byte sum */ + control->tbl_byte_sum -= old_hdr_byte_sum; + control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num); + + control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256); +} + +/* table sum mod 256 + checksum must equals 256 */ +static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control, + struct eeprom_table_record *records, int num) +{ + control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num); + + if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) { + DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum); + return false; + } + + return true; +} + +int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control) +{ + unsigned char buff[EEPROM_ADDRESS_SIZE + EEPROM_TABLE_HEADER_SIZE] = { 0 }; + struct amdgpu_ras_eeprom_table_header *hdr = &control->tbl_hdr; + int ret = 0; + + mutex_lock(&control->tbl_mutex); + + hdr->header = EEPROM_TABLE_HDR_VAL; + hdr->version = EEPROM_TABLE_VER; + hdr->first_rec_offset = EEPROM_RECORD_START; + hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE; + + control->tbl_byte_sum = 0; + __update_tbl_checksum(control, NULL, 0, 0); + control->next_addr = EEPROM_RECORD_START; + + ret = __update_table_header(control, buff); + + mutex_unlock(&control->tbl_mutex); + + return ret; + +} int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) { @@ -122,6 +216,10 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor); break; + case CHIP_ARCTURUS: + ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor); + break; + default: return 0; } @@ -143,25 +241,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control) if (hdr->header == EEPROM_TABLE_HDR_VAL) { control->num_recs = (hdr->tbl_size - EEPROM_TABLE_HEADER_SIZE) / EEPROM_TABLE_RECORD_SIZE; + control->tbl_byte_sum = __calc_hdr_byte_sum(control); + control->next_addr = EEPROM_RECORD_START; + DRM_DEBUG_DRIVER("Found existing EEPROM table with %d records", control->num_recs); } else { DRM_INFO("Creating new EEPROM table"); - hdr->header = EEPROM_TABLE_HDR_VAL; - hdr->version = EEPROM_TABLE_VER; - hdr->first_rec_offset = EEPROM_RECORD_START; - hdr->tbl_size = EEPROM_TABLE_HEADER_SIZE; - - adev->psp.ras.ras->eeprom_control.tbl_byte_sum = - __calc_hdr_byte_sum(&adev->psp.ras.ras->eeprom_control); - ret = __update_table_header(control, buff); + ret = amdgpu_ras_eeprom_reset_table(control); } - /* Start inserting records from here */ - adev->psp.ras.ras->eeprom_control.next_addr = EEPROM_RECORD_START; - return ret == 1 ? 0 : -EIO; } @@ -173,6 +264,9 @@ void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control) case CHIP_VEGA20: smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor); break; + case CHIP_ARCTURUS: + smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor); + break; default: return; @@ -226,8 +320,8 @@ static void __decode_table_record_from_buff(struct amdgpu_ras_eeprom_control *co record->offset = (le64_to_cpu(tmp) & 0xffffffffffff); i += 6; - buff[i++] = record->mem_channel; - buff[i++] = record->mcumc_id; + record->mem_channel = buff[i++]; + record->mcumc_id = buff[i++]; memcpy(&tmp, buff + i, 6); record->retired_page = (le64_to_cpu(tmp) & 0xffffffffffff); @@ -266,87 +360,18 @@ static uint32_t __correct_eeprom_dest_address(uint32_t curr_address) return curr_address; } - -static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control) -{ - int i; - uint32_t tbl_sum = 0; - - /* Header checksum, skip checksum field in the calculation */ - for (i = 0; i < sizeof(control->tbl_hdr) - sizeof(control->tbl_hdr.checksum); i++) - tbl_sum += *(((unsigned char *)&control->tbl_hdr) + i); - - return tbl_sum; -} - -static uint32_t __calc_recs_byte_sum(struct eeprom_table_record *records, - int num) -{ - int i, j; - uint32_t tbl_sum = 0; - - /* Records checksum */ - for (i = 0; i < num; i++) { - struct eeprom_table_record *record = &records[i]; - - for (j = 0; j < sizeof(*record); j++) { - tbl_sum += *(((unsigned char *)record) + j); - } - } - - return tbl_sum; -} - -static inline uint32_t __calc_tbl_byte_sum(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, int num) -{ - return __calc_hdr_byte_sum(control) + __calc_recs_byte_sum(records, num); -} - -/* Checksum = 256 -((sum of all table entries) mod 256) */ -static void __update_tbl_checksum(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, int num, - uint32_t old_hdr_byte_sum) -{ - /* - * This will update the table sum with new records. - * - * TODO: What happens when the EEPROM table is to be wrapped around - * and old records from start will get overridden. - */ - - /* need to recalculate updated header byte sum */ - control->tbl_byte_sum -= old_hdr_byte_sum; - control->tbl_byte_sum += __calc_tbl_byte_sum(control, records, num); - - control->tbl_hdr.checksum = 256 - (control->tbl_byte_sum % 256); -} - -/* table sum mod 256 + checksum must equals 256 */ -static bool __validate_tbl_checksum(struct amdgpu_ras_eeprom_control *control, - struct eeprom_table_record *records, int num) -{ - control->tbl_byte_sum = __calc_tbl_byte_sum(control, records, num); - - if (control->tbl_hdr.checksum + (control->tbl_byte_sum % 256) != 256) { - DRM_WARN("Checksum mismatch, checksum: %u ", control->tbl_hdr.checksum); - return false; - } - - return true; -} - int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *records, bool write, int num) { int i, ret = 0; - struct i2c_msg *msgs; - unsigned char *buffs; + struct i2c_msg *msgs, *msg; + unsigned char *buffs, *buff; + struct eeprom_table_record *record; struct amdgpu_device *adev = to_amdgpu_device(control); - if (adev->asic_type != CHIP_VEGA20) + if (adev->asic_type != CHIP_VEGA20 && adev->asic_type != CHIP_ARCTURUS) return 0; buffs = kcalloc(num, EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE, @@ -373,9 +398,9 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, * 256b */ for (i = 0; i < num; i++) { - unsigned char *buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; - struct eeprom_table_record *record = &records[i]; - struct i2c_msg *msg = &msgs[i]; + buff = &buffs[i * (EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; + record = &records[i]; + msg = &msgs[i]; control->next_addr = __correct_eeprom_dest_address(control->next_addr); @@ -415,8 +440,8 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, if (!write) { for (i = 0; i < num; i++) { - unsigned char *buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; - struct eeprom_table_record *record = &records[i]; + buff = &buffs[i*(EEPROM_ADDRESS_SIZE + EEPROM_TABLE_RECORD_SIZE)]; + record = &records[i]; __decode_table_record_from_buff(control, record, buff + EEPROM_ADDRESS_SIZE); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h index 41f3fcb9a29b..622269957c1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h @@ -79,6 +79,7 @@ struct eeprom_table_record { int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control); void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control); +int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control); int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control, struct eeprom_table_record *records, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c index 5c13c503e61f..6010999d9020 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c @@ -23,6 +23,7 @@ #include "amdgpu.h" #include "amdgpu_sdma.h" +#include "amdgpu_ras.h" #define AMDGPU_CSA_SDMA_SIZE 64 /* SDMA CSA reside in the 3rd page of CSA */ @@ -83,3 +84,101 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, return csa_mc_addr; } + +int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, + void *ras_ih_info) +{ + int r, i; + struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info; + struct ras_fs_if fs_info = { + .sysfs_name = "sdma_err_count", + .debugfs_name = "sdma_err_inject", + }; + + if (!ih_info) + return -EINVAL; + + if (!adev->sdma.ras_if) { + adev->sdma.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->sdma.ras_if) + return -ENOMEM; + adev->sdma.ras_if->block = AMDGPU_RAS_BLOCK__SDMA; + adev->sdma.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->sdma.ras_if->sub_block_index = 0; + strcpy(adev->sdma.ras_if->name, "sdma"); + } + fs_info.head = ih_info->head = *adev->sdma.ras_if; + + r = amdgpu_ras_late_init(adev, adev->sdma.ras_if, + &fs_info, ih_info); + if (r) + goto free; + + if (amdgpu_ras_is_supported(adev, adev->sdma.ras_if->block)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, + AMDGPU_SDMA_IRQ_INSTANCE0 + i); + if (r) + goto late_fini; + } + } else { + r = 0; + goto free; + } + + return 0; + +late_fini: + amdgpu_ras_late_fini(adev, adev->sdma.ras_if, ih_info); +free: + kfree(adev->sdma.ras_if); + adev->sdma.ras_if = NULL; + return r; +} + +void amdgpu_sdma_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) && + adev->sdma.ras_if) { + struct ras_common_if *ras_if = adev->sdma.ras_if; + struct ras_ih_if ih_info = { + .head = *ras_if, + /* the cb member will not be used by + * amdgpu_ras_interrupt_remove_handler, init it only + * to cheat the check in ras_late_fini + */ + .cb = amdgpu_sdma_process_ras_data_cb, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} + +int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry) +{ + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + amdgpu_ras_reset_gpu(adev, 0); + + return AMDGPU_RAS_SUCCESS; +} + +int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct ras_common_if *ras_if = adev->sdma.ras_if; + struct ras_dispatch_if ih_data = { + .entry = entry, + }; + + if (!ras_if) + return 0; + + ih_data.head = *ras_if; + + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index a9ae0d8a0589..761ff8be6314 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -104,4 +104,13 @@ struct amdgpu_sdma_instance * amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring); int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index); uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid); +int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, + void *ras_ih_info); +void amdgpu_sdma_ras_fini(struct amdgpu_device *adev); +int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, + void *err_data, + struct amdgpu_iv_entry *entry); +int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c index b66d29d5ffa2..b158230af8db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_test.c @@ -138,6 +138,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) } dma_fence_put(fence); + fence = NULL; r = amdgpu_bo_kmap(vram_obj, &vram_map); if (r) { @@ -183,6 +184,7 @@ static void amdgpu_do_test_moves(struct amdgpu_device *adev) } dma_fence_put(fence); + fence = NULL; r = amdgpu_bo_kmap(gtt_obj[i], >t_map); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 77674a7b9616..63e734a125fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -170,7 +170,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, __field(unsigned int, context) __field(unsigned int, seqno) __field(struct dma_fence *, fence) - __field(char *, ring_name) + __string(ring, to_amdgpu_ring(job->base.sched)->name) __field(u32, num_ibs) ), @@ -179,12 +179,12 @@ TRACE_EVENT(amdgpu_cs_ioctl, __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __entry->ring_name = to_amdgpu_ring(job->base.sched)->name; + __assign_str(ring, to_amdgpu_ring(job->base.sched)->name) __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", __entry->sched_job_id, __get_str(timeline), __entry->context, - __entry->seqno, __entry->ring_name, __entry->num_ibs) + __entry->seqno, __get_str(ring), __entry->num_ibs) ); TRACE_EVENT(amdgpu_sched_run_job, @@ -195,7 +195,7 @@ TRACE_EVENT(amdgpu_sched_run_job, __string(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __field(unsigned int, context) __field(unsigned int, seqno) - __field(char *, ring_name) + __string(ring, to_amdgpu_ring(job->base.sched)->name) __field(u32, num_ibs) ), @@ -204,12 +204,12 @@ TRACE_EVENT(amdgpu_sched_run_job, __assign_str(timeline, AMDGPU_JOB_GET_TIMELINE_NAME(job)) __entry->context = job->base.s_fence->finished.context; __entry->seqno = job->base.s_fence->finished.seqno; - __entry->ring_name = to_amdgpu_ring(job->base.sched)->name; + __assign_str(ring, to_amdgpu_ring(job->base.sched)->name) __entry->num_ibs = job->num_ibs; ), TP_printk("sched_job=%llu, timeline=%s, context=%u, seqno=%u, ring_name=%s, num_ibs=%u", __entry->sched_job_id, __get_str(timeline), __entry->context, - __entry->seqno, __entry->ring_name, __entry->num_ibs) + __entry->seqno, __get_str(ring), __entry->num_ibs) ); @@ -323,14 +323,15 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs, TRACE_EVENT(amdgpu_vm_set_ptes, TP_PROTO(uint64_t pe, uint64_t addr, unsigned count, - uint32_t incr, uint64_t flags), - TP_ARGS(pe, addr, count, incr, flags), + uint32_t incr, uint64_t flags, bool direct), + TP_ARGS(pe, addr, count, incr, flags, direct), TP_STRUCT__entry( __field(u64, pe) __field(u64, addr) __field(u32, count) __field(u32, incr) __field(u64, flags) + __field(bool, direct) ), TP_fast_assign( @@ -339,28 +340,32 @@ TRACE_EVENT(amdgpu_vm_set_ptes, __entry->count = count; __entry->incr = incr; __entry->flags = flags; + __entry->direct = direct; ), - TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u", - __entry->pe, __entry->addr, __entry->incr, - __entry->flags, __entry->count) + TP_printk("pe=%010Lx, addr=%010Lx, incr=%u, flags=%llx, count=%u, " + "direct=%d", __entry->pe, __entry->addr, __entry->incr, + __entry->flags, __entry->count, __entry->direct) ); TRACE_EVENT(amdgpu_vm_copy_ptes, - TP_PROTO(uint64_t pe, uint64_t src, unsigned count), - TP_ARGS(pe, src, count), + TP_PROTO(uint64_t pe, uint64_t src, unsigned count, bool direct), + TP_ARGS(pe, src, count, direct), TP_STRUCT__entry( __field(u64, pe) __field(u64, src) __field(u32, count) + __field(bool, direct) ), TP_fast_assign( __entry->pe = pe; __entry->src = src; __entry->count = count; + __entry->direct = direct; ), - TP_printk("pe=%010Lx, src=%010Lx, count=%u", - __entry->pe, __entry->src, __entry->count) + TP_printk("pe=%010Lx, src=%010Lx, count=%u, direct=%d", + __entry->pe, __entry->src, __entry->count, + __entry->direct) ); TRACE_EVENT(amdgpu_vm_flush, @@ -468,7 +473,7 @@ TRACE_EVENT(amdgpu_ib_pipe_sync, TP_PROTO(struct amdgpu_job *sched_job, struct dma_fence *fence), TP_ARGS(sched_job, fence), TP_STRUCT__entry( - __field(const char *,name) + __string(ring, sched_job->base.sched->name) __field(uint64_t, id) __field(struct dma_fence *, fence) __field(uint64_t, ctx) @@ -476,14 +481,14 @@ TRACE_EVENT(amdgpu_ib_pipe_sync, ), TP_fast_assign( - __entry->name = sched_job->base.sched->name; + __assign_str(ring, sched_job->base.sched->name) __entry->id = sched_job->base.id; __entry->fence = fence; __entry->ctx = fence->context; __entry->seqno = fence->seqno; ), TP_printk("job ring=%s, id=%llu, need pipe sync to fence=%p, context=%llu, seq=%u", - __entry->name, __entry->id, + __get_str(ring), __entry->id, __entry->fence, __entry->ctx, __entry->seqno) ); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dff41d0a85fe..61d9b7774d42 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -39,6 +39,7 @@ #include <linux/slab.h> #include <linux/swap.h> #include <linux/swiotlb.h> +#include <linux/dma-buf.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> @@ -54,6 +55,7 @@ #include "amdgpu_trace.h" #include "amdgpu_amdkfd.h" #include "amdgpu_sdma.h" +#include "amdgpu_ras.h" #include "bif/bif_4_1_d.h" static int amdgpu_map_buffer(struct ttm_buffer_object *bo, @@ -484,15 +486,12 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) { - struct amdgpu_device *adev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; struct ttm_place placements; struct ttm_placement placement; int r; - adev = amdgpu_ttm_adev(bo->bdev); - /* create space/pages for new_mem in GTT space */ tmp_mem = *new_mem; tmp_mem.mm_node = NULL; @@ -543,15 +542,12 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) { - struct amdgpu_device *adev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; struct ttm_placement placement; struct ttm_place placements; int r; - adev = amdgpu_ttm_adev(bo->bdev); - /* make space in GTT for old_mem buffer */ tmp_mem = *new_mem; tmp_mem.mm_node = NULL; @@ -763,6 +759,7 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, */ struct amdgpu_ttm_tt { struct ttm_dma_tt ttm; + struct drm_gem_object *gobj; u64 offset; uint64_t userptr; struct task_struct *usertask; @@ -1217,16 +1214,14 @@ static struct ttm_backend_func amdgpu_backend_func = { static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { - struct amdgpu_device *adev; struct amdgpu_ttm_tt *gtt; - adev = amdgpu_ttm_adev(bo->bdev); - gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); if (gtt == NULL) { return NULL; } gtt->ttm.ttm.func = &amdgpu_backend_func; + gtt->gobj = &bo->base; /* allocate space for the uninitialized page entries */ if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) { @@ -1247,7 +1242,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, { struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ if (gtt && gtt->userptr) { @@ -1260,7 +1254,19 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, return 0; } - if (slave && ttm->sg) { + if (ttm->page_flags & TTM_PAGE_FLAG_SG) { + if (!ttm->sg) { + struct dma_buf_attachment *attach; + struct sg_table *sgt; + + attach = gtt->gobj->import_attach; + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + ttm->sg = sgt; + } + drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); @@ -1287,9 +1293,8 @@ static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm, */ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) { - struct amdgpu_device *adev; struct amdgpu_ttm_tt *gtt = (void *)ttm; - bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + struct amdgpu_device *adev; if (gtt && gtt->userptr) { amdgpu_ttm_tt_set_user_pages(ttm, NULL); @@ -1298,7 +1303,16 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm) return; } - if (slave) + if (ttm->sg && gtt->gobj->import_attach) { + struct dma_buf_attachment *attach; + + attach = gtt->gobj->import_attach; + dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); + ttm->sg = NULL; + return; + } + + if (ttm->page_flags & TTM_PAGE_FLAG_SG) return; adev = amdgpu_ttm_adev(ttm->bdev); @@ -1634,81 +1648,105 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) */ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) { - struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_bo_param bp; - int r = 0; - int i; - u64 vram_size = adev->gmc.visible_vram_size; - u64 offset = adev->fw_vram_usage.start_offset; - u64 size = adev->fw_vram_usage.size; - struct amdgpu_bo *bo; - - memset(&bp, 0, sizeof(bp)); - bp.size = adev->fw_vram_usage.size; - bp.byte_align = PAGE_SIZE; - bp.domain = AMDGPU_GEM_DOMAIN_VRAM; - bp.flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | - AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; - bp.type = ttm_bo_type_kernel; - bp.resv = NULL; + uint64_t vram_size = adev->gmc.visible_vram_size; + adev->fw_vram_usage.va = NULL; adev->fw_vram_usage.reserved_bo = NULL; - if (adev->fw_vram_usage.size > 0 && - adev->fw_vram_usage.size <= vram_size) { + if (adev->fw_vram_usage.size == 0 || + adev->fw_vram_usage.size > vram_size) + return 0; - r = amdgpu_bo_create(adev, &bp, - &adev->fw_vram_usage.reserved_bo); - if (r) - goto error_create; + return amdgpu_bo_create_kernel_at(adev, + adev->fw_vram_usage.start_offset, + adev->fw_vram_usage.size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->fw_vram_usage.reserved_bo, + &adev->fw_vram_usage.va); +} - r = amdgpu_bo_reserve(adev->fw_vram_usage.reserved_bo, false); - if (r) - goto error_reserve; +/* + * Memoy training reservation functions + */ - /* remove the original mem node and create a new one at the - * request position - */ - bo = adev->fw_vram_usage.reserved_bo; - offset = ALIGN(offset, PAGE_SIZE); - for (i = 0; i < bo->placement.num_placement; ++i) { - bo->placements[i].fpfn = offset >> PAGE_SHIFT; - bo->placements[i].lpfn = (offset + size) >> PAGE_SHIFT; - } +/** + * amdgpu_ttm_training_reserve_vram_fini - free memory training reserved vram + * + * @adev: amdgpu_device pointer + * + * free memory training reserved vram if it has been reserved. + */ +static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev) +{ + struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; - ttm_bo_mem_put(&bo->tbo, &bo->tbo.mem); - r = ttm_bo_mem_space(&bo->tbo, &bo->placement, - &bo->tbo.mem, &ctx); - if (r) - goto error_pin; + ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; + amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL); + ctx->c2p_bo = NULL; - r = amdgpu_bo_pin_restricted(adev->fw_vram_usage.reserved_bo, - AMDGPU_GEM_DOMAIN_VRAM, - adev->fw_vram_usage.start_offset, - (adev->fw_vram_usage.start_offset + - adev->fw_vram_usage.size)); - if (r) - goto error_pin; - r = amdgpu_bo_kmap(adev->fw_vram_usage.reserved_bo, - &adev->fw_vram_usage.va); - if (r) - goto error_kmap; + amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL); + ctx->p2c_bo = NULL; - amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); + return 0; +} + +/** + * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training + * + * @adev: amdgpu_device pointer + * + * create bo vram reservation from memory training. + */ +static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev) +{ + int ret; + struct psp_memory_training_context *ctx = &adev->psp.mem_train_ctx; + + memset(ctx, 0, sizeof(*ctx)); + if (!adev->fw_vram_usage.mem_train_support) { + DRM_DEBUG("memory training does not support!\n"); + return 0; } - return r; -error_kmap: - amdgpu_bo_unpin(adev->fw_vram_usage.reserved_bo); -error_pin: - amdgpu_bo_unreserve(adev->fw_vram_usage.reserved_bo); -error_reserve: - amdgpu_bo_unref(&adev->fw_vram_usage.reserved_bo); -error_create: - adev->fw_vram_usage.va = NULL; - adev->fw_vram_usage.reserved_bo = NULL; - return r; + ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc; + ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET); + ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; + + DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", + ctx->train_data_size, + ctx->p2c_train_data_offset, + ctx->c2p_train_data_offset); + + ret = amdgpu_bo_create_kernel_at(adev, + ctx->p2c_train_data_offset, + ctx->train_data_size, + AMDGPU_GEM_DOMAIN_VRAM, + &ctx->p2c_bo, + NULL); + if (ret) { + DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret); + goto Err_out; + } + + ret = amdgpu_bo_create_kernel_at(adev, + ctx->c2p_train_data_offset, + ctx->train_data_size, + AMDGPU_GEM_DOMAIN_VRAM, + &ctx->c2p_bo, + NULL); + if (ret) { + DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); + goto Err_out; + } + + ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS; + return 0; + +Err_out: + amdgpu_ttm_training_reserve_vram_fini(adev); + return ret; } + /** * amdgpu_ttm_init - Init the memory management (ttm) as well as various * gtt/vram related fields. @@ -1731,6 +1769,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->ddev->anon_inode->i_mapping, + adev->ddev->vma_offset_manager, dma_addressing_limited(adev->dev)); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); @@ -1771,6 +1810,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } + /* + *The reserved vram for memory training must be pinned to the specified + *place on the VRAM, so reserve it early. + */ + r = amdgpu_ttm_training_reserve_vram_init(adev); + if (r) + return r; + /* allocate memory as required for VGA * This is used for VGA emulation and pre-OS scanout buffers to * avoid display artifacts while transitioning between pre-OS @@ -1781,6 +1828,20 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) NULL, &stolen_vga_buf); if (r) return r; + + /* + * reserve one TMR (64K) memory at the top of VRAM which holds + * IP Discovery data and is protected by PSP. + */ + r = amdgpu_bo_create_kernel_at(adev, + adev->gmc.real_vram_size - DISCOVERY_TMR_SIZE, + DISCOVERY_TMR_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->discovery_memory, + NULL); + if (r) + return r; + DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); @@ -1856,7 +1917,11 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) return; amdgpu_ttm_debugfs_fini(adev); + amdgpu_ttm_training_reserve_vram_fini(adev); + /* return the IP Discovery TMR memory back to VRAM */ + amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL); amdgpu_ttm_fw_reserve_vram_fini(adev); + if (adev->mman.aper_base_kaddr) iounmap(adev->mman.aper_base_kaddr); adev->mman.aper_base_kaddr = NULL; @@ -1952,10 +2017,7 @@ static int amdgpu_map_buffer(struct ttm_buffer_object *bo, *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GPU_PAGE_SIZE; - num_dw = adev->mman.buffer_funcs->copy_num_dw; - while (num_dw & 0x7) - num_dw++; - + num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = num_pages * 8; r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, &job); @@ -2015,11 +2077,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, max_bytes = adev->mman.buffer_funcs->copy_max_bytes; num_loops = DIV_ROUND_UP(byte_count, max_bytes); - num_dw = num_loops * adev->mman.buffer_funcs->copy_num_dw; - - /* for IB padding */ - while (num_dw & 0x7) - num_dw++; + num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 3a6115ad0196..833fc4b68940 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -360,6 +360,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_RAVEN: case CHIP_VEGA12: case CHIP_VEGA20: + case CHIP_ARCTURUS: case CHIP_RENOIR: case CHIP_NAVI10: case CHIP_NAVI14: @@ -368,8 +369,6 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) return AMDGPU_FW_LOAD_DIRECT; else return AMDGPU_FW_LOAD_PSP; - case CHIP_ARCTURUS: - return AMDGPU_FW_LOAD_DIRECT; default: DRM_ERROR("Unknown firmware load type\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index b34f00d42049..410587b950f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -108,6 +108,12 @@ struct ta_firmware_header_v1_0 { uint32_t ta_ras_ucode_version; uint32_t ta_ras_offset_bytes; uint32_t ta_ras_size_bytes; + uint32_t ta_hdcp_ucode_version; + uint32_t ta_hdcp_offset_bytes; + uint32_t ta_hdcp_size_bytes; + uint32_t ta_dtm_ucode_version; + uint32_t ta_dtm_offset_bytes; + uint32_t ta_dtm_size_bytes; }; /* version_major=1, version_minor=0 */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c new file mode 100644 index 000000000000..d4fb9cf27e21 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -0,0 +1,158 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu_ras.h" + +int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_fs_if fs_info = { + .sysfs_name = "umc_err_count", + .debugfs_name = "umc_err_inject", + }; + struct ras_ih_if ih_info = { + .cb = amdgpu_umc_process_ras_data_cb, + }; + + if (!adev->umc.ras_if) { + adev->umc.ras_if = + kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->umc.ras_if) + return -ENOMEM; + adev->umc.ras_if->block = AMDGPU_RAS_BLOCK__UMC; + adev->umc.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->umc.ras_if->sub_block_index = 0; + strcpy(adev->umc.ras_if->name, "umc"); + } + ih_info.head = fs_info.head = *adev->umc.ras_if; + + r = amdgpu_ras_late_init(adev, adev->umc.ras_if, + &fs_info, &ih_info); + if (r) + goto free; + + if (amdgpu_ras_is_supported(adev, adev->umc.ras_if->block)) { + r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); + if (r) + goto late_fini; + } else { + r = 0; + goto free; + } + + /* ras init of specific umc version */ + if (adev->umc.funcs && adev->umc.funcs->err_cnt_init) + adev->umc.funcs->err_cnt_init(adev); + + return 0; + +late_fini: + amdgpu_ras_late_fini(adev, adev->umc.ras_if, &ih_info); +free: + kfree(adev->umc.ras_if); + adev->umc.ras_if = NULL; + return r; +} + +void amdgpu_umc_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) && + adev->umc.ras_if) { + struct ras_common_if *ras_if = adev->umc.ras_if; + struct ras_ih_if ih_info = { + .head = *ras_if, + .cb = amdgpu_umc_process_ras_data_cb, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} + +int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, + void *ras_error_status, + struct amdgpu_iv_entry *entry) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + /* When “Full RAS” is enabled, the per-IP interrupt sources should + * be disabled and the driver should only look for the aggregated + * interrupt via sync flood + */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return AMDGPU_RAS_SUCCESS; + + kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + if (adev->umc.funcs && + adev->umc.funcs->query_ras_error_count) + adev->umc.funcs->query_ras_error_count(adev, ras_error_status); + + if (adev->umc.funcs && + adev->umc.funcs->query_ras_error_address && + adev->umc.max_ras_err_cnt_per_query) { + err_data->err_addr = + kcalloc(adev->umc.max_ras_err_cnt_per_query, + sizeof(struct eeprom_table_record), GFP_KERNEL); + /* still call query_ras_error_address to clear error status + * even NOMEM error is encountered + */ + if(!err_data->err_addr) + DRM_WARN("Failed to alloc memory for umc error address record!\n"); + + /* umc query_ras_error_address is also responsible for clearing + * error status + */ + adev->umc.funcs->query_ras_error_address(adev, ras_error_status); + } + + /* only uncorrectable error needs gpu reset */ + if (err_data->ue_count) { + if (err_data->err_addr_cnt && + amdgpu_ras_add_bad_pages(adev, err_data->err_addr, + err_data->err_addr_cnt)) + DRM_WARN("Failed to add ras bad page!\n"); + + amdgpu_ras_reset_gpu(adev, 0); + } + + kfree(err_data->err_addr); + return AMDGPU_RAS_SUCCESS; +} + +int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + struct ras_common_if *ras_if = adev->umc.ras_if; + struct ras_dispatch_if ih_data = { + .entry = entry, + }; + + if (!ras_if) + return 0; + + ih_data.head = *ras_if; + + amdgpu_ras_interrupt_dispatch(adev, &ih_data); + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 975afa04df09..3283032a78e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -54,7 +54,8 @@ adev->umc.funcs->disable_umc_index_mode(adev); struct amdgpu_umc_funcs { - void (*ras_init)(struct amdgpu_device *adev); + void (*err_cnt_init)(struct amdgpu_device *adev); + int (*ras_late_init)(struct amdgpu_device *adev); void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); void (*query_ras_error_address)(struct amdgpu_device *adev, @@ -62,6 +63,7 @@ struct amdgpu_umc_funcs { void (*enable_umc_index_mode)(struct amdgpu_device *adev, uint32_t umc_instance); void (*disable_umc_index_mode)(struct amdgpu_device *adev); + void (*init_registers)(struct amdgpu_device *adev); }; struct amdgpu_umc { @@ -75,8 +77,17 @@ struct amdgpu_umc { uint32_t channel_offs; /* channel index table of interleaved memory */ const uint32_t *channel_idx_tbl; + struct ras_common_if *ras_if; const struct amdgpu_umc_funcs *funcs; }; +int amdgpu_umc_ras_late_init(struct amdgpu_device *adev); +void amdgpu_umc_ras_fini(struct amdgpu_device *adev); +int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, + void *ras_error_status, + struct amdgpu_iv_entry *entry); +int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index b2c364b8695f..e324bfe6c58f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -39,6 +39,8 @@ #include "cikd.h" #include "uvd/uvd_4_2_d.h" +#include "amdgpu_ras.h" + /* 1 second timeout */ #define UVD_IDLE_TIMEOUT msecs_to_jiffies(1000) @@ -297,6 +299,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { int i, j; + cancel_delayed_work_sync(&adev->uvd.idle_work); drm_sched_entity_destroy(&adev->uvd.entity); for (j = 0; j < adev->uvd.num_uvd_inst; ++j) { @@ -372,7 +375,13 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev) if (!adev->uvd.inst[j].saved_bo) return -ENOMEM; - memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); + /* re-write 0 since err_event_athub will corrupt VCPU buffer */ + if (amdgpu_ras_intr_triggered()) { + DRM_WARN("UVD VCPU state may lost due to RAS ERREVENT_ATHUB_INTERRUPT\n"); + memset(adev->uvd.inst[j].saved_bo, 0, size); + } else { + memcpy_fromio(adev->uvd.inst[j].saved_bo, ptr, size); + } } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 65044b1b3d4c..46b590af2fd2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -80,6 +80,11 @@ MODULE_FIRMWARE(FIRMWARE_VEGA12); MODULE_FIRMWARE(FIRMWARE_VEGA20); static void amdgpu_vce_idle_work_handler(struct work_struct *work); +static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, + struct amdgpu_bo *bo, + struct dma_fence **fence); +static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, + bool direct, struct dma_fence **fence); /** * amdgpu_vce_init - allocate memory, load vce firmware @@ -211,6 +216,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev) if (adev->vce.vcpu_bo == NULL) return 0; + cancel_delayed_work_sync(&adev->vce.idle_work); drm_sched_entity_destroy(&adev->vce.entity); amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr, @@ -428,9 +434,9 @@ void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp) * * Open up a stream for HW test */ -int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, - struct dma_fence **fence) +static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, + struct amdgpu_bo *bo, + struct dma_fence **fence) { const unsigned ib_size_dw = 1024; struct amdgpu_job *job; @@ -508,8 +514,8 @@ err: * * Close up a stream for HW test or if userspace failed to do so */ -int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct dma_fence **fence) +static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, + bool direct, struct dma_fence **fence) { const unsigned ib_size_dw = 1024; struct amdgpu_job *job; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h index e802f7d9db0a..d6d83a3ec803 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h @@ -58,11 +58,6 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev); int amdgpu_vce_entity_init(struct amdgpu_device *adev); int amdgpu_vce_suspend(struct amdgpu_device *adev); int amdgpu_vce_resume(struct amdgpu_device *adev); -int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, - struct amdgpu_bo *bo, - struct dma_fence **fence); -int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, - bool direct, struct dma_fence **fence); void amdgpu_vce_free_handles(struct amdgpu_device *adev, struct drm_file *filp); int amdgpu_vce_ring_parse_cs(struct amdgpu_cs_parser *p, uint32_t ib_idx); int amdgpu_vce_ring_parse_cs_vm(struct amdgpu_cs_parser *p, uint32_t ib_idx); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 3199e4a5ff12..9d870444d7d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -193,6 +193,8 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev) { int i, j; + cancel_delayed_work_sync(&adev->vcn.idle_work); + if (adev->vcn.indirect_sram) { amdgpu_bo_free_kernel(&adev->vcn.dpg_sram_bo, &adev->vcn.dpg_sram_gpu_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 5251352f5922..598c24505c73 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -130,7 +130,8 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev, if (level == adev->vm_manager.root_level) /* For the root directory */ - return round_up(adev->vm_manager.max_pfn, 1ULL << shift) >> shift; + return round_up(adev->vm_manager.max_pfn, 1ULL << shift) + >> shift; else if (level != AMDGPU_VM_PTB) /* Everything in between */ return 512; @@ -341,7 +342,7 @@ static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt) return container_of(parent->vm_bo, struct amdgpu_vm_pt, base); } -/** +/* * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt */ struct amdgpu_vm_pt_cursor { @@ -482,6 +483,7 @@ static void amdgpu_vm_pt_next(struct amdgpu_device *adev, * * @adev: amdgpu_device structure * @vm: amdgpu_vm structure + * @start: optional cursor to start with * @cursor: state to initialize * * Starts a deep first traversal of the PD/PT tree. @@ -535,7 +537,7 @@ static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev, amdgpu_vm_pt_ancestor(cursor); } -/** +/* * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs */ #define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, start, cursor, entry) \ @@ -566,6 +568,14 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, list_add(&entry->tv.head, validated); } +/** + * amdgpu_vm_del_from_lru_notify - update bulk_moveable flag + * + * @bo: BO which was removed from the LRU + * + * Make sure the bulk_moveable flag is updated when a BO is removed from the + * LRU. + */ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) { struct amdgpu_bo *abo; @@ -600,19 +610,18 @@ void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo) void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, struct amdgpu_vm *vm) { - struct ttm_bo_global *glob = adev->mman.bdev.glob; struct amdgpu_vm_bo_base *bo_base; if (vm->bulk_moveable) { - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); return; } memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move)); - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); list_for_each_entry(bo_base, &vm->idle, vm_status) { struct amdgpu_bo *bo = bo_base->bo; @@ -624,7 +633,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, ttm_bo_move_to_lru_tail(&bo->shadow->tbo, &vm->lru_bulk_move); } - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); vm->bulk_moveable = true; } @@ -693,6 +702,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) * @adev: amdgpu_device pointer * @vm: VM to clear BO from * @bo: BO to clear + * @direct: use a direct update * * Root PD needs to be reserved when calling this. * @@ -701,7 +711,8 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm) */ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_bo *bo) + struct amdgpu_bo *bo, + bool direct) { struct ttm_operation_ctx ctx = { true, false }; unsigned level = adev->vm_manager.root_level; @@ -760,6 +771,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; + params.direct = direct; r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_KFD, NULL); if (r) @@ -813,10 +825,13 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @vm: requesting vm + * @level: the page table level + * @direct: use a direct update * @bp: resulting BO allocation parameters */ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, - int level, struct amdgpu_bo_param *bp) + int level, bool direct, + struct amdgpu_bo_param *bp) { memset(bp, 0, sizeof(*bp)); @@ -831,6 +846,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, else if (!vm->root.base.bo || vm->root.base.bo->shadow) bp->flags |= AMDGPU_GEM_CREATE_SHADOW; bp->type = ttm_bo_type_kernel; + bp->no_wait_gpu = direct; if (vm->root.base.bo) bp->resv = vm->root.base.bo->tbo.base.resv; } @@ -841,6 +857,7 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, * @adev: amdgpu_device pointer * @vm: VM to allocate page tables for * @cursor: Which page table to allocate + * @direct: use a direct update * * Make sure a specific page table or directory is allocated. * @@ -850,7 +867,8 @@ static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm, */ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, struct amdgpu_vm *vm, - struct amdgpu_vm_pt_cursor *cursor) + struct amdgpu_vm_pt_cursor *cursor, + bool direct) { struct amdgpu_vm_pt *entry = cursor->entry; struct amdgpu_bo_param bp; @@ -871,7 +889,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, if (entry->base.bo) return 0; - amdgpu_vm_bo_param(adev, vm, cursor->level, &bp); + amdgpu_vm_bo_param(adev, vm, cursor->level, direct, &bp); r = amdgpu_bo_create(adev, &bp, &pt); if (r) @@ -883,7 +901,7 @@ static int amdgpu_vm_alloc_pts(struct amdgpu_device *adev, pt->parent = amdgpu_bo_ref(cursor->parent->base.bo); amdgpu_vm_bo_base_init(&entry->base, vm, pt); - r = amdgpu_vm_clear_bo(adev, vm, pt); + r = amdgpu_vm_clear_bo(adev, vm, pt, direct); if (r) goto error_free_pt; @@ -1020,7 +1038,8 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, * Returns: * 0 on success, errno otherwise. */ -int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync) +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, + bool need_pipe_sync) { struct amdgpu_device *adev = ring->adev; unsigned vmhub = ring->funcs->vmhub; @@ -1034,10 +1053,8 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ id->oa_base != job->oa_base || id->oa_size != job->oa_size); bool vm_flush_needed = job->vm_needs_flush; - bool pasid_mapping_needed = id->pasid != job->pasid || - !id->pasid_mapping || - !dma_fence_is_signaled(id->pasid_mapping); struct dma_fence *fence = NULL; + bool pasid_mapping_needed = false; unsigned patch_offset = 0; int r; @@ -1047,6 +1064,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ pasid_mapping_needed = true; } + mutex_lock(&id_mgr->lock); + if (id->pasid != job->pasid || !id->pasid_mapping || + !dma_fence_is_signaled(id->pasid_mapping)) + pasid_mapping_needed = true; + mutex_unlock(&id_mgr->lock); + gds_switch_needed &= !!ring->funcs->emit_gds_switch; vm_flush_needed &= !!ring->funcs->emit_vm_flush && job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET; @@ -1086,9 +1109,11 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_ } if (pasid_mapping_needed) { + mutex_lock(&id_mgr->lock); id->pasid = job->pasid; dma_fence_put(id->pasid_mapping); id->pasid_mapping = dma_fence_get(fence); + mutex_unlock(&id_mgr->lock); } dma_fence_put(fence); @@ -1172,10 +1197,10 @@ uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr) return result; } -/* +/** * amdgpu_vm_update_pde - update a single level in the hierarchy * - * @param: parameters for the update + * @params: parameters for the update * @vm: requested vm * @entry: entry to update * @@ -1199,7 +1224,7 @@ static int amdgpu_vm_update_pde(struct amdgpu_vm_update_params *params, return vm->update_funcs->update(params, bo, pde, pt, 1, 0, flags); } -/* +/** * amdgpu_vm_invalidate_pds - mark all PDs as invalid * * @adev: amdgpu_device pointer @@ -1218,19 +1243,20 @@ static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev, amdgpu_vm_bo_relocated(&entry->base); } -/* - * amdgpu_vm_update_directories - make sure that all directories are valid +/** + * amdgpu_vm_update_pdes - make sure that all directories are valid * * @adev: amdgpu_device pointer * @vm: requested vm + * @direct: submit directly to the paging queue * * Makes sure all directories are up to date. * * Returns: * 0 for success, error for failure. */ -int amdgpu_vm_update_directories(struct amdgpu_device *adev, - struct amdgpu_vm *vm) +int amdgpu_vm_update_pdes(struct amdgpu_device *adev, + struct amdgpu_vm *vm, bool direct) { struct amdgpu_vm_update_params params; int r; @@ -1241,6 +1267,7 @@ int amdgpu_vm_update_directories(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; + params.direct = direct; r = vm->update_funcs->prepare(¶ms, AMDGPU_FENCE_OWNER_VM, NULL); if (r) @@ -1268,7 +1295,7 @@ error: return r; } -/** +/* * amdgpu_vm_update_flags - figure out flags for PTE updates * * Make sure to set the right flags for the PTEs at the desired level. @@ -1391,7 +1418,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, uint64_t incr, entry_end, pe_start; struct amdgpu_bo *pt; - r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor); + /* make sure that the page tables covering the address range are + * actually allocated + */ + r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor, + params->direct); if (r) return r; @@ -1463,7 +1494,12 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, } while (frag_start < entry_end); if (amdgpu_vm_pt_descendant(adev, &cursor)) { - /* Free all child entries */ + /* Free all child entries. + * Update the tables with the flags and addresses and free up subsequent + * tables in the case of huge pages or freed up areas. + * This is the maximum you can free, because all other page tables are not + * completely covered by the range and so potentially still in use. + */ while (cursor.pfn < frag_start) { amdgpu_vm_free_pts(adev, params->vm, &cursor); amdgpu_vm_pt_next(adev, &cursor); @@ -1482,13 +1518,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer - * @exclusive: fence we need to sync to - * @pages_addr: DMA addresses to use for mapping * @vm: requested vm + * @direct: direct submission in a page fault + * @exclusive: fence we need to sync to * @start: start of mapped range * @last: last mapped entry * @flags: flags for the entries * @addr: addr to set the area to + * @pages_addr: DMA addresses to use for mapping * @fence: optional resulting fence * * Fill in the page table entries between @start and @last. @@ -1497,11 +1534,11 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params, * 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, + struct amdgpu_vm *vm, bool direct, struct dma_fence *exclusive, - dma_addr_t *pages_addr, - struct amdgpu_vm *vm, uint64_t start, uint64_t last, uint64_t flags, uint64_t addr, + dma_addr_t *pages_addr, struct dma_fence **fence) { struct amdgpu_vm_update_params params; @@ -1511,6 +1548,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; + params.direct = direct; params.pages_addr = pages_addr; /* sync to everything except eviction fences on unmapping */ @@ -1569,27 +1607,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, if (!(mapping->flags & AMDGPU_PTE_WRITEABLE)) flags &= ~AMDGPU_PTE_WRITEABLE; - flags &= ~AMDGPU_PTE_EXECUTABLE; - flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; - - if (adev->asic_type >= CHIP_NAVI10) { - flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; - flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); - } else { - flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; - flags |= (mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK); - } - - if ((mapping->flags & AMDGPU_PTE_PRT) && - (adev->asic_type >= CHIP_VEGA10)) { - flags |= AMDGPU_PTE_PRT; - if (adev->asic_type >= CHIP_NAVI10) { - flags |= AMDGPU_PTE_SNOOPED; - flags |= AMDGPU_PTE_LOG; - flags |= AMDGPU_PTE_SYSTEM; - } - flags &= ~AMDGPU_PTE_VALID; - } + /* Apply ASIC specific mapping flags */ + amdgpu_gmc_get_vm_pte(adev, mapping, &flags); trace_amdgpu_vm_bo_update(mapping); @@ -1633,7 +1652,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, dma_addr = pages_addr; } else { addr = pages_addr[pfn]; - max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE; + max_entries = count * + AMDGPU_GPU_PAGES_IN_CPU_PAGE; } } else if (flags & AMDGPU_PTE_VALID) { @@ -1642,9 +1662,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, } last = min((uint64_t)mapping->last, start + max_entries - 1); - r = amdgpu_vm_bo_update_mapping(adev, exclusive, dma_addr, vm, + r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive, start, last, flags, addr, - fence); + dma_addr, fence); if (r) return r; @@ -1672,8 +1692,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, * Returns: * 0 for success, -EINVAL for failure. */ -int amdgpu_vm_bo_update(struct amdgpu_device *adev, - struct amdgpu_bo_va *bo_va, +int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, bool clear) { struct amdgpu_bo *bo = bo_va->base.bo; @@ -1700,7 +1719,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); pages_addr = ttm->dma_address; } - exclusive = dma_resv_get_excl(bo->tbo.base.resv); + exclusive = bo->tbo.moving; } if (bo) { @@ -1731,12 +1750,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, return r; } - if (vm->use_cpu_for_update) { - /* Flush HDP */ - mb(); - amdgpu_asic_flush_hdp(adev, NULL); - } - /* If the BO is not in its preferred location add it back to * the evicted list so that it gets validated again on the * next command submission. @@ -1744,7 +1757,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) { uint32_t mem_type = bo->tbo.mem.mem_type; - if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type))) + if (!(bo->preferred_domains & + amdgpu_mem_type_to_domain(mem_type))) amdgpu_vm_bo_evicted(&bo_va->base); else amdgpu_vm_bo_idle(&bo_va->base); @@ -1938,9 +1952,9 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, mapping->start < AMDGPU_GMC_HOLE_START) init_pte_value = AMDGPU_PTE_DEFAULT_ATC; - r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm, + r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL, mapping->start, mapping->last, - init_pte_value, 0, &f); + init_pte_value, 0, NULL, &f); amdgpu_vm_free_mapping(adev, vm, mapping, f); if (r) { dma_fence_put(f); @@ -2682,12 +2696,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, spin_lock_init(&vm->invalidated_lock); INIT_LIST_HEAD(&vm->freed); - /* create scheduler entity for page table updates */ - r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs, + /* create scheduler entities for page table updates */ + r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs, adev->vm_manager.vm_pte_num_rqs, NULL); if (r) return r; + r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs, + adev->vm_manager.vm_pte_num_rqs, NULL); + if (r) + goto error_free_direct; + vm->pte_support_ats = false; if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) { @@ -2702,7 +2721,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, } DRM_DEBUG_DRIVER("VM update mode is %s\n", vm->use_cpu_for_update ? "CPU" : "SDMA"); - WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), + WARN_ONCE((vm->use_cpu_for_update && + !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); if (vm->use_cpu_for_update) @@ -2711,12 +2731,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, vm->update_funcs = &amdgpu_vm_sdma_funcs; vm->last_update = NULL; - amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp); + amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, false, &bp); if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW; r = amdgpu_bo_create(adev, &bp, &root); if (r) - goto error_free_sched_entity; + goto error_free_delayed; r = amdgpu_bo_reserve(root, true); if (r) @@ -2728,7 +2748,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, amdgpu_vm_bo_base_init(&vm->root.base, vm, root); - r = amdgpu_vm_clear_bo(adev, vm, root); + r = amdgpu_vm_clear_bo(adev, vm, root, false); if (r) goto error_unreserve; @@ -2759,8 +2779,11 @@ error_free_root: amdgpu_bo_unref(&vm->root.base.bo); vm->root.base.bo = NULL; -error_free_sched_entity: - drm_sched_entity_destroy(&vm->entity); +error_free_delayed: + drm_sched_entity_destroy(&vm->delayed); + +error_free_direct: + drm_sched_entity_destroy(&vm->direct); return r; } @@ -2801,6 +2824,7 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, * * @adev: amdgpu_device pointer * @vm: requested vm + * @pasid: pasid to use * * This only works on GFX VMs that don't have any BOs added and no * page tables allocated yet. @@ -2816,7 +2840,8 @@ static int amdgpu_vm_check_clean_reserved(struct amdgpu_device *adev, * Returns: * 0 for success, -errno for errors. */ -int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid) +int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, + unsigned int pasid) { bool pte_support_ats = (adev->asic_type == CHIP_RAVEN); int r; @@ -2848,7 +2873,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns */ if (pte_support_ats != vm->pte_support_ats) { vm->pte_support_ats = pte_support_ats; - r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo); + r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo, false); if (r) goto free_idr; } @@ -2858,7 +2883,8 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, uns AMDGPU_VM_USE_CPU_FOR_COMPUTE); DRM_DEBUG_DRIVER("VM update mode is %s\n", vm->use_cpu_for_update ? "CPU" : "SDMA"); - WARN_ONCE((vm->use_cpu_for_update && !amdgpu_gmc_vram_full_visible(&adev->gmc)), + WARN_ONCE((vm->use_cpu_for_update && + !amdgpu_gmc_vram_full_visible(&adev->gmc)), "CPU update of VM recommended only for large BAR system\n"); if (vm->use_cpu_for_update) @@ -2937,19 +2963,38 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) struct amdgpu_bo_va_mapping *mapping, *tmp; bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt; struct amdgpu_bo *root; - int i, r; + int i; amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm); + root = amdgpu_bo_ref(vm->root.base.bo); + amdgpu_bo_reserve(root, true); if (vm->pasid) { unsigned long flags; spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags); idr_remove(&adev->vm_manager.pasid_idr, vm->pasid); spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags); + vm->pasid = 0; + } + + list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { + if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { + amdgpu_vm_prt_fini(adev, vm); + prt_fini_needed = false; + } + + list_del(&mapping->list); + amdgpu_vm_free_mapping(adev, vm, mapping, NULL); } - drm_sched_entity_destroy(&vm->entity); + amdgpu_vm_free_pts(adev, vm, NULL); + amdgpu_bo_unreserve(root); + amdgpu_bo_unref(&root); + WARN_ON(vm->root.base.bo); + + drm_sched_entity_destroy(&vm->direct); + drm_sched_entity_destroy(&vm->delayed); if (!RB_EMPTY_ROOT(&vm->va.rb_root)) { dev_err(adev->dev, "still active bo inside vm\n"); @@ -2962,26 +3007,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) list_del(&mapping->list); kfree(mapping); } - list_for_each_entry_safe(mapping, tmp, &vm->freed, list) { - if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) { - amdgpu_vm_prt_fini(adev, vm); - prt_fini_needed = false; - } - - list_del(&mapping->list); - amdgpu_vm_free_mapping(adev, vm, mapping, NULL); - } - root = amdgpu_bo_ref(vm->root.base.bo); - r = amdgpu_bo_reserve(root, true); - if (r) { - dev_err(adev->dev, "Leaking page tables because BO reservation failed\n"); - } else { - amdgpu_vm_free_pts(adev, vm, NULL); - amdgpu_bo_unreserve(root); - } - amdgpu_bo_unref(&root); - WARN_ON(vm->root.base.bo); dma_fence_put(vm->last_update); for (i = 0; i < AMDGPU_MAX_VMHUBS; i++) amdgpu_vmid_free_reserved(adev, vm, i); @@ -3065,8 +3091,9 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) switch (args->in.op) { case AMDGPU_VM_OP_RESERVE_VMID: - /* current, we only have requirement to reserve vmid from gfxhub */ - r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0); + /* We only have requirement to reserve vmid from gfxhub */ + r = amdgpu_vmid_alloc_reserved(adev, &fpriv->vm, + AMDGPU_GFXHUB_0); if (r) return r; break; @@ -3109,13 +3136,88 @@ void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, */ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm) { - if (!vm->task_info.pid) { - vm->task_info.pid = current->pid; - get_task_comm(vm->task_info.task_name, current); + if (vm->task_info.pid) + return; - if (current->group_leader->mm == current->mm) { - vm->task_info.tgid = current->group_leader->pid; - get_task_comm(vm->task_info.process_name, current->group_leader); - } + vm->task_info.pid = current->pid; + get_task_comm(vm->task_info.task_name, current); + + if (current->group_leader->mm != current->mm) + return; + + vm->task_info.tgid = current->group_leader->pid; + get_task_comm(vm->task_info.process_name, current->group_leader); +} + +/** + * amdgpu_vm_handle_fault - graceful handling of VM faults. + * @adev: amdgpu device pointer + * @pasid: PASID of the VM + * @addr: Address of the fault + * + * Try to gracefully handle a VM fault. Return true if the fault was handled and + * shouldn't be reported any more. + */ +bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, + uint64_t addr) +{ + struct amdgpu_bo *root; + uint64_t value, flags; + struct amdgpu_vm *vm; + long r; + + spin_lock(&adev->vm_manager.pasid_lock); + vm = idr_find(&adev->vm_manager.pasid_idr, pasid); + if (vm) + root = amdgpu_bo_ref(vm->root.base.bo); + else + root = NULL; + spin_unlock(&adev->vm_manager.pasid_lock); + + if (!root) + return false; + + r = amdgpu_bo_reserve(root, true); + if (r) + goto error_unref; + + /* Double check that the VM still exists */ + spin_lock(&adev->vm_manager.pasid_lock); + vm = idr_find(&adev->vm_manager.pasid_idr, pasid); + if (vm && vm->root.base.bo != root) + vm = NULL; + spin_unlock(&adev->vm_manager.pasid_lock); + if (!vm) + goto error_unlock; + + addr /= AMDGPU_GPU_PAGE_SIZE; + flags = AMDGPU_PTE_VALID | AMDGPU_PTE_SNOOPED | + AMDGPU_PTE_SYSTEM; + + if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) { + /* Redirect the access to the dummy page */ + value = adev->dummy_page_addr; + flags |= AMDGPU_PTE_EXECUTABLE | AMDGPU_PTE_READABLE | + AMDGPU_PTE_WRITEABLE; + } else { + /* Let the hw retry silently on the PTE */ + value = 0; } + + r = amdgpu_vm_bo_update_mapping(adev, vm, true, NULL, addr, addr + 1, + flags, value, NULL, NULL); + if (r) + goto error_unlock; + + r = amdgpu_vm_update_pdes(adev, vm, true); + +error_unlock: + amdgpu_bo_unreserve(root); + if (r < 0) + DRM_ERROR("Can't handle page fault (%ld)\n", r); + +error_unref: + amdgpu_bo_unref(&root); + + return false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 2eda3a8c330d..4dbbe1b6b413 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -99,6 +99,9 @@ struct amdgpu_bo_list_entry; #define AMDGPU_VM_FAULT_STOP_FIRST 1 #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 +/* Reserve 4MB VRAM for page tables */ +#define AMDGPU_VM_RESERVED_VRAM (4ULL << 20) + /* max number of VMHUB */ #define AMDGPU_MAX_VMHUBS 3 #define AMDGPU_GFXHUB_0 0 @@ -199,6 +202,11 @@ struct amdgpu_vm_update_params { struct amdgpu_vm *vm; /** + * @direct: if changes should be made directly + */ + bool direct; + + /** * @pages_addr: * * DMA addresses to use for mapping @@ -254,8 +262,9 @@ struct amdgpu_vm { struct amdgpu_vm_pt root; struct dma_fence *last_update; - /* Scheduler entity for page table updates */ - struct drm_sched_entity entity; + /* Scheduler entities for page table updates */ + struct drm_sched_entity direct; + struct drm_sched_entity delayed; unsigned int pasid; /* dedicated to vm */ @@ -357,8 +366,8 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, int (*callback)(void *p, struct amdgpu_bo *bo), void *param); int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); -int amdgpu_vm_update_directories(struct amdgpu_device *adev, - struct amdgpu_vm *vm); +int amdgpu_vm_update_pdes(struct amdgpu_device *adev, + struct amdgpu_vm *vm, bool direct); int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_vm *vm, struct dma_fence **fence); @@ -404,6 +413,8 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid, struct amdgpu_task_info *task_info); +bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, unsigned int pasid, + uint64_t addr); void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index 5222d165abfc..73fec7a0ced5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -49,13 +49,6 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner, { int r; - /* Wait for PT BOs to be idle. PTs share the same resv. object - * as the root PD BO - */ - r = amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true); - if (unlikely(r)) - return r; - /* Wait for any BO move to be completed */ if (exclusive) { r = dma_fence_wait(exclusive, true); @@ -63,7 +56,14 @@ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner, return r; } - return 0; + /* Don't wait for submissions during page fault */ + if (p->direct) + return 0; + + /* Wait for PT BOs to be idle. PTs share the same resv. object + * as the root PD BO + */ + return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true); } /** @@ -89,7 +89,7 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p, pe += (unsigned long)amdgpu_bo_kptr(bo); - trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); for (i = 0; i < count; i++) { value = p->pages_addr ? diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 61fc584cbb1a..832db59f441e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -68,17 +68,19 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, if (r) return r; + p->num_dw_left = ndw; + + /* Wait for moves to be completed */ r = amdgpu_sync_fence(p->adev, &p->job->sync, exclusive, false); if (r) return r; - r = amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv, - owner, false); - if (r) - return r; + /* Don't wait for any submissions during page fault handling */ + if (p->direct) + return 0; - p->num_dw_left = ndw; - return 0; + return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv, + owner, false); } /** @@ -95,22 +97,23 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, { struct amdgpu_bo *root = p->vm->root.base.bo; struct amdgpu_ib *ib = p->job->ibs; + struct drm_sched_entity *entity; struct amdgpu_ring *ring; struct dma_fence *f; int r; - ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched); + entity = p->direct ? &p->vm->direct : &p->vm->delayed; + ring = container_of(entity->rq->sched, struct amdgpu_ring, sched); WARN_ON(ib->length_dw == 0); amdgpu_ring_pad_ib(ring, ib); WARN_ON(ib->length_dw > p->num_dw_left); - r = amdgpu_job_submit(p->job, &p->vm->entity, - AMDGPU_FENCE_OWNER_VM, &f); + r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f); if (r) goto error; amdgpu_bo_fence(root, f, true); - if (fence) + if (fence && !p->direct) swap(*fence, f); dma_fence_put(f); return 0; @@ -120,7 +123,6 @@ error: return r; } - /** * amdgpu_vm_sdma_copy_ptes - copy the PTEs from mapping * @@ -141,7 +143,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p, src += p->num_dw_left * 4; pe += amdgpu_bo_gpu_offset(bo); - trace_amdgpu_vm_copy_ptes(pe, src, count); + trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct); amdgpu_vm_copy_pte(p->adev, ib, pe, src, count); } @@ -168,7 +170,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p, struct amdgpu_ib *ib = p->job->ibs; pe += amdgpu_bo_gpu_offset(bo); - trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags); + trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct); if (count < 3) { amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags, count, incr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 3a9d8c15fe9f..82a3299e53c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -23,6 +23,9 @@ */ #include "amdgpu.h" +#include "amdgpu_vm.h" +#include "amdgpu_atomfirmware.h" +#include "atom.h" struct amdgpu_vram_mgr { struct drm_mm mm; @@ -101,6 +104,39 @@ static ssize_t amdgpu_mem_info_vis_vram_used_show(struct device *dev, amdgpu_vram_mgr_vis_usage(&adev->mman.bdev.man[TTM_PL_VRAM])); } +static ssize_t amdgpu_mem_info_vram_vendor(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + + switch (adev->gmc.vram_vendor) { + case SAMSUNG: + return snprintf(buf, PAGE_SIZE, "samsung\n"); + case INFINEON: + return snprintf(buf, PAGE_SIZE, "infineon\n"); + case ELPIDA: + return snprintf(buf, PAGE_SIZE, "elpida\n"); + case ETRON: + return snprintf(buf, PAGE_SIZE, "etron\n"); + case NANYA: + return snprintf(buf, PAGE_SIZE, "nanya\n"); + case HYNIX: + return snprintf(buf, PAGE_SIZE, "hynix\n"); + case MOSEL: + return snprintf(buf, PAGE_SIZE, "mosel\n"); + case WINBOND: + return snprintf(buf, PAGE_SIZE, "winbond\n"); + case ESMT: + return snprintf(buf, PAGE_SIZE, "esmt\n"); + case MICRON: + return snprintf(buf, PAGE_SIZE, "micron\n"); + default: + return snprintf(buf, PAGE_SIZE, "unknown\n"); + } +} + static DEVICE_ATTR(mem_info_vram_total, S_IRUGO, amdgpu_mem_info_vram_total_show, NULL); static DEVICE_ATTR(mem_info_vis_vram_total, S_IRUGO, @@ -109,6 +145,8 @@ static DEVICE_ATTR(mem_info_vram_used, S_IRUGO, amdgpu_mem_info_vram_used_show, NULL); static DEVICE_ATTR(mem_info_vis_vram_used, S_IRUGO, amdgpu_mem_info_vis_vram_used_show, NULL); +static DEVICE_ATTR(mem_info_vram_vendor, S_IRUGO, + amdgpu_mem_info_vram_vendor, NULL); /** * amdgpu_vram_mgr_init - init VRAM manager and DRM MM @@ -154,6 +192,11 @@ static int amdgpu_vram_mgr_init(struct ttm_mem_type_manager *man, DRM_ERROR("Failed to create device file mem_info_vis_vram_used\n"); return ret; } + ret = device_create_file(adev->dev, &dev_attr_mem_info_vram_vendor); + if (ret) { + DRM_ERROR("Failed to create device file mem_info_vram_vendor\n"); + return ret; + } return 0; } @@ -180,6 +223,7 @@ static int amdgpu_vram_mgr_fini(struct ttm_mem_type_manager *man) device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_total); device_remove_file(adev->dev, &dev_attr_mem_info_vram_used); device_remove_file(adev->dev, &dev_attr_mem_info_vis_vram_used); + device_remove_file(adev->dev, &dev_attr_mem_info_vram_vendor); return 0; } @@ -275,7 +319,7 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, struct drm_mm_node *nodes; enum drm_mm_insert_mode mode; unsigned long lpfn, num_nodes, pages_per_node, pages_left; - uint64_t vis_usage = 0, mem_bytes; + uint64_t vis_usage = 0, mem_bytes, max_bytes; unsigned i; int r; @@ -283,9 +327,13 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, if (!lpfn) lpfn = man->size; + max_bytes = adev->gmc.mc_vram_size; + if (tbo->type != ttm_bo_type_kernel) + max_bytes -= AMDGPU_VM_RESERVED_VRAM; + /* bail out quickly if there's likely not enough VRAM for this BO */ mem_bytes = (u64)mem->num_pages << PAGE_SHIFT; - if (atomic64_add_return(mem_bytes, &mgr->usage) > adev->gmc.mc_vram_size) { + if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) { atomic64_sub(mem_bytes, &mgr->usage); mem->mm_node = NULL; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 65aae75f80fd..61d13d8b7b20 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -25,6 +25,7 @@ #include "amdgpu.h" #include "amdgpu_xgmi.h" #include "amdgpu_smu.h" +#include "amdgpu_ras.h" #include "df/df_3_6_offset.h" static DEFINE_MUTEX(xgmi_mutex); @@ -273,22 +274,55 @@ int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate) { int ret = 0; struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev, 0); + struct amdgpu_device *tmp_adev; + bool update_hive_pstate = true; + bool is_high_pstate = pstate && adev->asic_type == CHIP_VEGA20; if (!hive) return 0; - if (hive->pstate == pstate) - return 0; + mutex_lock(&hive->hive_lock); + + if (hive->pstate == pstate) { + adev->pstate = is_high_pstate ? pstate : adev->pstate; + goto out; + } dev_dbg(adev->dev, "Set xgmi pstate %d.\n", pstate); if (is_support_sw_smu_xgmi(adev)) ret = smu_set_xgmi_pstate(&adev->smu, pstate); - if (ret) + else if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->set_xgmi_pstate) + ret = adev->powerplay.pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, + pstate); + + if (ret) { dev_err(adev->dev, "XGMI: Set pstate failure on device %llx, hive %llx, ret %d", adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id, ret); + goto out; + } + + /* Update device pstate */ + adev->pstate = pstate; + + /* + * Update the hive pstate only all devices of the hive + * are in the same pstate + */ + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { + if (tmp_adev->pstate != adev->pstate) { + update_hive_pstate = false; + break; + } + } + if (update_hive_pstate || is_high_pstate) + hive->pstate = pstate; + +out: + mutex_unlock(&hive->hive_lock); return ret; } @@ -363,6 +397,9 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev) goto exit; } + /* Set default device pstate */ + adev->pstate = -1; + top_info = &adev->psp.xgmi_context.top_info; list_add_tail(&adev->gmc.xgmi.head, &hive->device_list); @@ -437,3 +474,52 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev) mutex_unlock(&hive->hive_lock); } } + +int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) +{ + int r; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + struct ras_fs_if fs_info = { + .sysfs_name = "xgmi_wafl_err_count", + .debugfs_name = "xgmi_wafl_err_inject", + }; + + if (!adev->gmc.xgmi.supported || + adev->gmc.xgmi.num_physical_nodes == 0) + return 0; + + if (!adev->gmc.xgmi.ras_if) { + adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); + if (!adev->gmc.xgmi.ras_if) + return -ENOMEM; + adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL; + adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; + adev->gmc.xgmi.ras_if->sub_block_index = 0; + strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl"); + } + ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if; + r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if, + &fs_info, &ih_info); + if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) { + kfree(adev->gmc.xgmi.ras_if); + adev->gmc.xgmi.ras_if = NULL; + } + + return r; +} + +void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev) +{ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) && + adev->gmc.xgmi.ras_if) { + struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if; + struct ras_ih_if ih_info = { + .cb = NULL, + }; + + amdgpu_ras_late_fini(adev, ras_if, &ih_info); + kfree(ras_if); + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index fbcee31788c4..bbf504ff7051 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -42,6 +42,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev); int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate); int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); +int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev); +void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev); static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev, struct amdgpu_device *bo_adev) diff --git a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c index 4853899b1824..fda99c958c3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/arct_reg_init.c @@ -24,7 +24,6 @@ #include "soc15.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "arct_ip_offset.h" int arct_reg_base_init(struct amdgpu_device *adev) @@ -52,6 +51,8 @@ int arct_reg_base_init(struct amdgpu_device *adev) adev->reg_offset[SDMA7_HWIP][i] = (uint32_t *)(&(SDMA7_BASE.instance[i])); adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i])); adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i])); + adev->reg_offset[UMC_HWIP][i] = (uint32_t *)(&(UMC_BASE.instance[i])); + adev->reg_offset[RSMU_HWIP][i] = (uint32_t *)(&(RSMU_BASE.instance[i])); } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index b81bb414fcb3..2d64d270725d 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -966,6 +966,25 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev, static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { {mmGRBM_STATUS}, + {mmGRBM_STATUS2}, + {mmGRBM_STATUS_SE0}, + {mmGRBM_STATUS_SE1}, + {mmGRBM_STATUS_SE2}, + {mmGRBM_STATUS_SE3}, + {mmSRBM_STATUS}, + {mmSRBM_STATUS2}, + {mmSDMA0_STATUS_REG + SDMA0_REGISTER_OFFSET}, + {mmSDMA0_STATUS_REG + SDMA1_REGISTER_OFFSET}, + {mmCP_STAT}, + {mmCP_STALLED_STAT1}, + {mmCP_STALLED_STAT2}, + {mmCP_STALLED_STAT3}, + {mmCP_CPF_BUSY_STAT}, + {mmCP_CPF_STALLED_STAT1}, + {mmCP_CPF_STATUS}, + {mmCP_CPC_BUSY_STAT}, + {mmCP_CPC_STALLED_STAT1}, + {mmCP_CPC_STATUS}, {mmGB_ADDR_CONFIG}, {mmMC_ARB_RAMCFG}, {mmGB_TILE_MODE0}, @@ -1270,15 +1289,15 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) } /** - * cik_asic_reset - soft reset GPU + * cik_asic_pci_config_reset - soft reset GPU * * @adev: amdgpu_device pointer * - * Look up which blocks are hung and attempt - * to reset them. + * Use PCI Config method to reset the GPU. + * * Returns 0 for success. */ -static int cik_asic_reset(struct amdgpu_device *adev) +static int cik_asic_pci_config_reset(struct amdgpu_device *adev) { int r; @@ -1294,7 +1313,45 @@ static int cik_asic_reset(struct amdgpu_device *adev) static enum amd_reset_method cik_asic_reset_method(struct amdgpu_device *adev) { - return AMD_RESET_METHOD_LEGACY; + bool baco_reset; + + switch (adev->asic_type) { + case CHIP_BONAIRE: + case CHIP_HAWAII: + /* disable baco reset until it works */ + /* smu7_asic_get_baco_capability(adev, &baco_reset); */ + baco_reset = false; + break; + default: + baco_reset = false; + break; + } + + if (baco_reset) + return AMD_RESET_METHOD_BACO; + else + return AMD_RESET_METHOD_LEGACY; +} + +/** + * cik_asic_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Look up which blocks are hung and attempt + * to reset them. + * Returns 0 for success. + */ +static int cik_asic_reset(struct amdgpu_device *adev) +{ + int r; + + if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) + r = smu7_asic_baco_reset(adev); + else + r = cik_asic_pci_config_reset(adev); + + return r; } static u32 cik_get_config_memsize(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/cik.h b/drivers/gpu/drm/amd/amdgpu/cik.h index 54c625a2e570..9870bf27870e 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.h +++ b/drivers/gpu/drm/amd/amdgpu/cik.h @@ -31,4 +31,7 @@ void cik_srbm_select(struct amdgpu_device *adev, int cik_set_ip_blocks(struct amdgpu_device *adev); void legacy_doorbell_index_init(struct amdgpu_device *adev); +int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap); +int smu7_asic_baco_reset(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 645550e7caf5..40d2ac723dd6 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -330,9 +330,11 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -368,6 +370,7 @@ static void dce_v10_0_hpd_init(struct amdgpu_device *adev) amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } /** @@ -382,9 +385,11 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -397,6 +402,7 @@ static void dce_v10_0_hpd_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } static u32 dce_v10_0_hpd_get_gpio_reg(struct amdgpu_device *adev) @@ -1219,10 +1225,12 @@ static void dce_v10_0_afmt_audio_select_pin(struct drm_encoder *encoder) static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp; int interlace = 0; @@ -1230,12 +1238,14 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1261,10 +1271,12 @@ static void dce_v10_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp; u8 *sadb = NULL; @@ -1273,12 +1285,14 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1313,10 +1327,12 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; struct cea_sad *sads; int i, sad_count; @@ -1339,12 +1355,14 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1352,10 +1370,10 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } BUG_ON(!sads); for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index d9f470632b2c..898ef72d423c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -348,9 +348,11 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -385,6 +387,7 @@ static void dce_v11_0_hpd_init(struct amdgpu_device *adev) dce_v11_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } /** @@ -399,9 +402,11 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -413,6 +418,7 @@ static void dce_v11_0_hpd_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } static u32 dce_v11_0_hpd_get_gpio_reg(struct amdgpu_device *adev) @@ -1245,10 +1251,12 @@ static void dce_v11_0_afmt_audio_select_pin(struct drm_encoder *encoder) static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp; int interlace = 0; @@ -1256,12 +1264,14 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1287,10 +1297,12 @@ static void dce_v11_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp; u8 *sadb = NULL; @@ -1299,12 +1311,14 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1339,10 +1353,12 @@ static void dce_v11_0_audio_write_speaker_allocation(struct drm_encoder *encoder static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; struct cea_sad *sads; int i, sad_count; @@ -1365,12 +1381,14 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) if (!dig || !dig->afmt || !dig->afmt->pin) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1378,10 +1396,10 @@ static void dce_v11_0_audio_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } BUG_ON(!sads); for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 3eb2e7429269..db15a112becc 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -281,9 +281,11 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -309,7 +311,7 @@ static void dce_v6_0_hpd_init(struct amdgpu_device *adev) dce_v6_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } - + drm_connector_list_iter_end(&iter); } /** @@ -324,9 +326,11 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -338,6 +342,7 @@ static void dce_v6_0_hpd_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } static u32 dce_v6_0_hpd_get_gpio_reg(struct amdgpu_device *adev) @@ -1124,20 +1129,24 @@ static void dce_v6_0_audio_select_pin(struct drm_encoder *encoder) static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; int interlace = 0; u32 tmp; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1164,21 +1173,25 @@ static void dce_v6_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u8 *sadb = NULL; int sad_count; u32 tmp; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1221,10 +1234,12 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; struct cea_sad *sads; int i, sad_count; @@ -1244,12 +1259,14 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) { ixAZALIA_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, }; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1257,10 +1274,10 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { u32 tmp = 0; @@ -1632,6 +1649,7 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; int em = amdgpu_atombios_encoder_get_encoder_mode(encoder); int bpc = 8; @@ -1639,12 +1657,14 @@ static void dce_v6_0_afmt_setmode(struct drm_encoder *encoder, if (!dig || !dig->afmt) return; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index a16c5e9e610e..f06c9022c1fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -275,9 +275,11 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -303,6 +305,7 @@ static void dce_v8_0_hpd_init(struct amdgpu_device *adev) dce_v8_0_hpd_set_polarity(adev, amdgpu_connector->hpd.hpd); amdgpu_irq_get(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } /** @@ -317,9 +320,11 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; u32 tmp; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector); if (amdgpu_connector->hpd.hpd >= adev->mode_info.num_hpd) @@ -331,6 +336,7 @@ static void dce_v8_0_hpd_fini(struct amdgpu_device *adev) amdgpu_irq_put(adev, &adev->hpd_irq, amdgpu_connector->hpd.hpd); } + drm_connector_list_iter_end(&iter); } static u32 dce_v8_0_hpd_get_gpio_reg(struct amdgpu_device *adev) @@ -1157,10 +1163,12 @@ static void dce_v8_0_afmt_audio_select_pin(struct drm_encoder *encoder) static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, struct drm_display_mode *mode) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 tmp = 0, offset; @@ -1169,12 +1177,14 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, offset = dig->afmt->pin->offset; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1214,10 +1224,12 @@ static void dce_v8_0_audio_write_latency_fields(struct drm_encoder *encoder, static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; u32 offset, tmp; u8 *sadb = NULL; @@ -1228,12 +1240,14 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) offset = dig->afmt->pin->offset; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1263,11 +1277,13 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder) static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) { - struct amdgpu_device *adev = encoder->dev->dev_private; + struct drm_device *dev = encoder->dev; + struct amdgpu_device *adev = dev->dev_private; struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv; u32 offset; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct amdgpu_connector *amdgpu_connector = NULL; struct cea_sad *sads; int i, sad_count; @@ -1292,12 +1308,14 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) offset = dig->afmt->pin->offset; - list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { if (connector->encoder == encoder) { amdgpu_connector = to_amdgpu_connector(connector); break; } } + drm_connector_list_iter_end(&iter); if (!amdgpu_connector) { DRM_ERROR("Couldn't find encoder's connector\n"); @@ -1305,10 +1323,10 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(amdgpu_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } BUG_ON(!sads); for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index c9608ae8643b..e4f94863332c 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -260,15 +260,14 @@ static struct drm_encoder * dce_virtual_encoder(struct drm_connector *connector) { struct drm_encoder *encoder; - int i; - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { if (encoder->encoder_type == DRM_MODE_ENCODER_VIRTUAL) return encoder; } /* pick the first one */ - drm_connector_for_each_possible_encoder(connector, encoder, i) + drm_connector_for_each_possible_encoder(connector, encoder) return encoder; return NULL; diff --git a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c index 844c03868248..d6221298b477 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v1_7.c @@ -33,6 +33,10 @@ static void df_v1_7_sw_init(struct amdgpu_device *adev) { } +static void df_v1_7_sw_fini(struct amdgpu_device *adev) +{ +} + static void df_v1_7_enable_broadcast_mode(struct amdgpu_device *adev, bool enable) { @@ -111,6 +115,7 @@ static void df_v1_7_enable_ecc_force_par_wr_rmw(struct amdgpu_device *adev, const struct amdgpu_df_funcs df_v1_7_funcs = { .sw_init = df_v1_7_sw_init, + .sw_fini = df_v1_7_sw_fini, .enable_broadcast_mode = df_v1_7_enable_broadcast_mode, .get_fb_channel_number = df_v1_7_get_fb_channel_number, .get_hbm_channel_number = df_v1_7_get_hbm_channel_number, diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 5850c8e34caa..16fbd2bc8ad1 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -99,8 +99,8 @@ static uint64_t df_v3_6_get_fica(struct amdgpu_device *adev, unsigned long flags, address, data; uint32_t ficadl_val, ficadh_val; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); @@ -122,8 +122,8 @@ static void df_v3_6_set_fica(struct amdgpu_device *adev, uint32_t ficaa_val, { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, smnDF_PIE_AON_FabricIndirectConfigAccessAddress3); @@ -150,8 +150,8 @@ static void df_v3_6_perfmon_rreg(struct amdgpu_device *adev, { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, lo_addr); @@ -172,8 +172,8 @@ static void df_v3_6_perfmon_wreg(struct amdgpu_device *adev, uint32_t lo_addr, { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, lo_addr); @@ -220,6 +220,13 @@ static void df_v3_6_sw_init(struct amdgpu_device *adev) adev->df_perfmon_config_assign_mask[i] = 0; } +static void df_v3_6_sw_fini(struct amdgpu_device *adev) +{ + + device_remove_file(adev->dev, &dev_attr_df_cntr_avail); + +} + static void df_v3_6_enable_broadcast_mode(struct amdgpu_device *adev, bool enable) { @@ -537,6 +544,7 @@ static void df_v3_6_pmc_get_count(struct amdgpu_device *adev, const struct amdgpu_df_funcs df_v3_6_funcs = { .sw_init = df_v3_6_sw_init, + .sw_fini = df_v3_6_sw_fini, .enable_broadcast_mode = df_v3_6_enable_broadcast_mode, .get_fb_channel_number = df_v3_6_get_fb_channel_number, .get_hbm_channel_number = df_v3_6_get_hbm_channel_number, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 53090eae0082..ca5f0e7ea1ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -127,7 +127,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010), SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CGTT_CLK_CTRL, 0xfeff0fff, 0x40000100), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000) }; static const struct soc15_reg_golden golden_settings_gc_10_0_nv10[] = @@ -171,7 +171,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_LDS_CLK_CTRL, 0xffffffff, 0xffffffff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTA_CNTL_AUX, 0xfff7ffff, 0x01030000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CNTL, 0x60000010, 0x479c0010), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00800000, 0x00800000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0x00c00000, 0x00c00000), }; static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = @@ -1469,7 +1469,7 @@ static int gfx_v10_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_gfx_mqd_sw_fini(adev); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); amdgpu_gfx_kiq_fini(adev); gfx_v10_0_pfp_fini(adev); @@ -1785,27 +1785,52 @@ static void gfx_v10_0_enable_gui_idle_interrupt(struct amdgpu_device *adev, WREG32_SOC15(GC, 0, mmCP_INT_CNTL_RING0, tmp); } -static void gfx_v10_0_init_csb(struct amdgpu_device *adev) +static int gfx_v10_0_init_csb(struct amdgpu_device *adev) { + int r; + + if (adev->in_gpu_reset) { + r = amdgpu_bo_reserve(adev->gfx.rlc.clear_state_obj, false); + if (r) + return r; + + r = amdgpu_bo_kmap(adev->gfx.rlc.clear_state_obj, + (void **)&adev->gfx.rlc.cs_ptr); + if (!r) { + adev->gfx.rlc.funcs->get_csb_buffer(adev, + adev->gfx.rlc.cs_ptr); + amdgpu_bo_kunmap(adev->gfx.rlc.clear_state_obj); + } + + amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); + if (r) + return r; + } + /* csib */ WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI, adev->gfx.rlc.clear_state_gpu_addr >> 32); WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO, adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc); WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size); + + return 0; } -static void gfx_v10_0_init_pg(struct amdgpu_device *adev) +static int gfx_v10_0_init_pg(struct amdgpu_device *adev) { int i; + int r; - gfx_v10_0_init_csb(adev); + r = gfx_v10_0_init_csb(adev); + if (r) + return r; for (i = 0; i < adev->num_vmhubs; i++) amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); /* TODO: init power gating */ - return; + return 0; } void gfx_v10_0_rlc_stop(struct amdgpu_device *adev) @@ -1907,7 +1932,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) r = gfx_v10_0_wait_for_rlc_autoload_complete(adev); if (r) return r; - gfx_v10_0_init_pg(adev); + + r = gfx_v10_0_init_pg(adev); + if (r) + return r; /* enable RLC SRM */ gfx_v10_0_rlc_enable_srm(adev); @@ -1933,7 +1961,10 @@ static int gfx_v10_0_rlc_resume(struct amdgpu_device *adev) return r; } - gfx_v10_0_init_pg(adev); + r = gfx_v10_0_init_pg(adev); + if (r) + return r; + adev->gfx.rlc.funcs->start(adev); if (adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO) { @@ -2400,7 +2431,7 @@ static int gfx_v10_0_wait_for_rlc_autoload_complete(struct amdgpu_device *adev) return 0; } -static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) +static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) { int i; u32 tmp = RREG32_SOC15(GC, 0, mmCP_ME_CNTL); @@ -2413,7 +2444,17 @@ static void gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable) adev->gfx.gfx_ring[i].sched.ready = false; } WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp); - udelay(50); + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + DRM_ERROR("failed to %s cp gfx\n", enable ? "unhalt" : "halt"); + + return 0; } static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) @@ -2470,7 +2511,7 @@ static int gfx_v10_0_cp_gfx_load_pfp_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_PFP_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_PFP_IC_BASE_CNTL, VMID, 0); @@ -2540,7 +2581,7 @@ static int gfx_v10_0_cp_gfx_load_ce_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CE_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CE_IC_BASE_CNTL, VMID, 0); @@ -2609,7 +2650,7 @@ static int gfx_v10_0_cp_gfx_load_me_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_ME_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_ME_IC_BASE_CNTL, VMID, 0); @@ -2930,7 +2971,7 @@ static int gfx_v10_0_cp_compute_load_microcode(struct amdgpu_device *adev) } if (amdgpu_emu_mode == 1) - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); tmp = RREG32_SOC15(GC, 0, mmCP_CPC_IC_BASE_CNTL); tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0); @@ -3114,6 +3155,7 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; struct v10_gfx_mqd *mqd = ring->mqd_ptr; + int mqd_idx = ring - &adev->gfx.gfx_ring[0]; if (!adev->in_gpu_reset && !adev->in_suspend) { memset((void *)mqd, 0, sizeof(*mqd)); @@ -3125,14 +3167,15 @@ static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring) #endif nv_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); - if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]) - memcpy(adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], mqd, sizeof(*mqd)); + if (adev->gfx.me.mqd_backup[mqd_idx]) + memcpy(adev->gfx.me.mqd_backup[mqd_idx], mqd, sizeof(*mqd)); } else if (adev->in_gpu_reset) { /* reset mqd with the backup copy */ - if (adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS]) - memcpy(mqd, adev->gfx.me.mqd_backup[AMDGPU_MAX_GFX_RINGS], sizeof(*mqd)); + if (adev->gfx.me.mqd_backup[mqd_idx]) + memcpy(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; + adev->wb.wb[ring->wptr_offs] = 0; amdgpu_ring_clear_ring(ring); #ifdef BRING_UP_DEBUG mutex_lock(&adev->srbm_mutex); @@ -4384,7 +4427,7 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask, reg_mem_engine; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { switch (ring->me) { @@ -4404,8 +4447,8 @@ static void gfx_v10_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) } gfx_v10_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, - adev->nbio_funcs->get_hdp_flush_req_offset(adev), - adev->nbio_funcs->get_hdp_flush_done_offset(adev), + adev->nbio.funcs->get_hdp_flush_req_offset(adev), + adev->nbio.funcs->get_hdp_flush_done_offset(adev), ref_and_mask, ref_and_mask, 0x20); } @@ -5331,15 +5374,12 @@ static void gfx_v10_0_set_rlc_funcs(struct amdgpu_device *adev) static void gfx_v10_0_set_gds_init(struct amdgpu_device *adev) { - /* init asic gds info */ - switch (adev->asic_type) { - case CHIP_NAVI10: - default: - adev->gds.gds_size = 0x10000; - adev->gds.gds_compute_max_wave_id = 0x4ff; - break; - } + unsigned total_cu = adev->gfx.config.max_cu_per_sh * + adev->gfx.config.max_sh_per_se * + adev->gfx.config.max_shader_engines; + adev->gds.gds_size = 0x10000; + adev->gds.gds_compute_max_wave_id = total_cu * 32 - 1; adev->gds.gws_size = 64; adev->gds.oa_size = 16; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 87dd55e9d72b..ffbde9136372 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -2103,7 +2103,7 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_gfx_mqd_sw_fini(adev); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); amdgpu_gfx_kiq_fini(adev); gfx_v8_0_mec_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 97cf0b536873..faf2ffce5837 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -131,6 +131,18 @@ MODULE_FIRMWARE("amdgpu/renoir_rlc.bin"); #define mmTCP_CHAN_STEER_5_ARCT 0x0b0c #define mmTCP_CHAN_STEER_5_ARCT_BASE_IDX 0 +struct ras_gfx_subblock_reg { + const char *name; + uint32_t hwip; + uint32_t inst; + uint32_t seg; + uint32_t reg_offset; + uint32_t sec_count_mask; + uint32_t sec_count_shift; + uint32_t ded_count_mask; + uint32_t ded_count_shift; +}; + enum ta_ras_gfx_subblock { /*CPC*/ TA_RAS_BLOCK__GFX_CPC_INDEX_START = 0, @@ -517,9 +529,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_0[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0xb5d3f197), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000003ff), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000) }; static const struct soc15_reg_golden golden_settings_gc_9_0_vg10[] = @@ -582,9 +594,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_1[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_CACHE_INVALIDATION, 0x3fff3af3, 0x19200000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmVGT_GS_MAX_WAVE_ID, 0x00000fff, 0x000000ff), SOC15_REG_GOLDEN_VALUE(GC, 0, mmWD_UTCL1_CNTL, 0x08000000, 0x08000080), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000) }; static const struct soc15_reg_golden golden_settings_gc_9_1_rv1[] = @@ -676,9 +688,9 @@ static const struct soc15_reg_golden golden_settings_gc_9_2_1_vg12[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_HI, 0xffffffff, 0x00000000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_LO, 0xffffffff, 0x76325410), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTD_CNTL, 0x01bd9f33, 0x01000000), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000000, 0x00000800), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00000000, 0x00008000) + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC1_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_MEC2_F32_INT_DIS, 0x00000800, 0x00000800), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmCP_DEBUG, 0x00008000, 0x00008000) }; static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] = @@ -691,6 +703,8 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_3_ARCT, 0x3fffffff, 0x2ebd9fe3), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_4_ARCT, 0x3fffffff, 0xb90f5b1), SOC15_REG_GOLDEN_VALUE(GC, 0, mmTCP_CHAN_STEER_5_ARCT, 0x3ff, 0x135), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_CONFIG, 0xffffffff, 0x011A0000), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00), }; static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] = @@ -1342,7 +1356,8 @@ static int gfx_v9_0_init_cp_compute_microcode(struct amdgpu_device *adev, /* TODO: Determine if MEC2 JT FW loading can be removed for all GFX V9 asic and above */ - if (adev->asic_type != CHIP_ARCTURUS) { + if (adev->asic_type != CHIP_ARCTURUS && + adev->asic_type != CHIP_RENOIR) { info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC2_JT]; info->ucode_id = AMDGPU_UCODE_ID_CP_MEC2_JT; info->fw = adev->gfx.mec2_fw; @@ -1974,190 +1989,6 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) return 0; } -static int gfx_v9_0_ngg_create_buf(struct amdgpu_device *adev, - struct amdgpu_ngg_buf *ngg_buf, - int size_se, - int default_size_se) -{ - int r; - - if (size_se < 0) { - dev_err(adev->dev, "Buffer size is invalid: %d\n", size_se); - return -EINVAL; - } - size_se = size_se ? size_se : default_size_se; - - ngg_buf->size = size_se * adev->gfx.config.max_shader_engines; - r = amdgpu_bo_create_kernel(adev, ngg_buf->size, - PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, - &ngg_buf->bo, - &ngg_buf->gpu_addr, - NULL); - if (r) { - dev_err(adev->dev, "(%d) failed to create NGG buffer\n", r); - return r; - } - ngg_buf->bo_size = amdgpu_bo_size(ngg_buf->bo); - - return r; -} - -static int gfx_v9_0_ngg_fini(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < NGG_BUF_MAX; i++) - amdgpu_bo_free_kernel(&adev->gfx.ngg.buf[i].bo, - &adev->gfx.ngg.buf[i].gpu_addr, - NULL); - - memset(&adev->gfx.ngg.buf[0], 0, - sizeof(struct amdgpu_ngg_buf) * NGG_BUF_MAX); - - adev->gfx.ngg.init = false; - - return 0; -} - -static int gfx_v9_0_ngg_init(struct amdgpu_device *adev) -{ - int r; - - if (!amdgpu_ngg || adev->gfx.ngg.init == true) - return 0; - - /* GDS reserve memory: 64 bytes alignment */ - adev->gfx.ngg.gds_reserve_size = ALIGN(5 * 4, 0x40); - adev->gds.gds_size -= adev->gfx.ngg.gds_reserve_size; - adev->gfx.ngg.gds_reserve_addr = RREG32_SOC15(GC, 0, mmGDS_VMID0_BASE); - adev->gfx.ngg.gds_reserve_addr += RREG32_SOC15(GC, 0, mmGDS_VMID0_SIZE); - - /* Primitive Buffer */ - r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PRIM], - amdgpu_prim_buf_per_se, - 64 * 1024); - if (r) { - dev_err(adev->dev, "Failed to create Primitive Buffer\n"); - goto err; - } - - /* Position Buffer */ - r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_POS], - amdgpu_pos_buf_per_se, - 256 * 1024); - if (r) { - dev_err(adev->dev, "Failed to create Position Buffer\n"); - goto err; - } - - /* Control Sideband */ - r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_CNTL], - amdgpu_cntl_sb_buf_per_se, - 256); - if (r) { - dev_err(adev->dev, "Failed to create Control Sideband Buffer\n"); - goto err; - } - - /* Parameter Cache, not created by default */ - if (amdgpu_param_buf_per_se <= 0) - goto out; - - r = gfx_v9_0_ngg_create_buf(adev, &adev->gfx.ngg.buf[NGG_PARAM], - amdgpu_param_buf_per_se, - 512 * 1024); - if (r) { - dev_err(adev->dev, "Failed to create Parameter Cache\n"); - goto err; - } - -out: - adev->gfx.ngg.init = true; - return 0; -err: - gfx_v9_0_ngg_fini(adev); - return r; -} - -static int gfx_v9_0_ngg_en(struct amdgpu_device *adev) -{ - struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0]; - int r; - u32 data, base; - - if (!amdgpu_ngg) - return 0; - - /* Program buffer size */ - data = REG_SET_FIELD(0, WD_BUF_RESOURCE_1, INDEX_BUF_SIZE, - adev->gfx.ngg.buf[NGG_PRIM].size >> 8); - data = REG_SET_FIELD(data, WD_BUF_RESOURCE_1, POS_BUF_SIZE, - adev->gfx.ngg.buf[NGG_POS].size >> 8); - WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_1, data); - - data = REG_SET_FIELD(0, WD_BUF_RESOURCE_2, CNTL_SB_BUF_SIZE, - adev->gfx.ngg.buf[NGG_CNTL].size >> 8); - data = REG_SET_FIELD(data, WD_BUF_RESOURCE_2, PARAM_BUF_SIZE, - adev->gfx.ngg.buf[NGG_PARAM].size >> 10); - WREG32_SOC15(GC, 0, mmWD_BUF_RESOURCE_2, data); - - /* Program buffer base address */ - base = lower_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); - data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE, BASE, base); - WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE, data); - - base = upper_32_bits(adev->gfx.ngg.buf[NGG_PRIM].gpu_addr); - data = REG_SET_FIELD(0, WD_INDEX_BUF_BASE_HI, BASE_HI, base); - WREG32_SOC15(GC, 0, mmWD_INDEX_BUF_BASE_HI, data); - - base = lower_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); - data = REG_SET_FIELD(0, WD_POS_BUF_BASE, BASE, base); - WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE, data); - - base = upper_32_bits(adev->gfx.ngg.buf[NGG_POS].gpu_addr); - data = REG_SET_FIELD(0, WD_POS_BUF_BASE_HI, BASE_HI, base); - WREG32_SOC15(GC, 0, mmWD_POS_BUF_BASE_HI, data); - - base = lower_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); - data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE, BASE, base); - WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE, data); - - base = upper_32_bits(adev->gfx.ngg.buf[NGG_CNTL].gpu_addr); - data = REG_SET_FIELD(0, WD_CNTL_SB_BUF_BASE_HI, BASE_HI, base); - WREG32_SOC15(GC, 0, mmWD_CNTL_SB_BUF_BASE_HI, data); - - /* Clear GDS reserved memory */ - r = amdgpu_ring_alloc(ring, 17); - if (r) { - DRM_ERROR("amdgpu: NGG failed to lock ring %s (%d).\n", - ring->name, r); - return r; - } - - gfx_v9_0_write_data_to_reg(ring, 0, false, - SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), - (adev->gds.gds_size + - adev->gfx.ngg.gds_reserve_size)); - - amdgpu_ring_write(ring, PACKET3(PACKET3_DMA_DATA, 5)); - amdgpu_ring_write(ring, (PACKET3_DMA_DATA_CP_SYNC | - PACKET3_DMA_DATA_DST_SEL(1) | - PACKET3_DMA_DATA_SRC_SEL(2))); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, adev->gfx.ngg.gds_reserve_addr); - amdgpu_ring_write(ring, 0); - amdgpu_ring_write(ring, PACKET3_DMA_DATA_CMD_RAW_WAIT | - adev->gfx.ngg.gds_reserve_size); - - gfx_v9_0_write_data_to_reg(ring, 0, false, - SOC15_REG_OFFSET(GC, 0, mmGDS_VMID0_SIZE), 0); - - amdgpu_ring_commit(ring); - - return 0; -} - static int gfx_v9_0_compute_ring_init(struct amdgpu_device *adev, int ring_id, int mec, int pipe, int queue) { @@ -2325,10 +2156,6 @@ static int gfx_v9_0_sw_init(void *handle) if (r) return r; - r = gfx_v9_0_ngg_init(adev); - if (r) - return r; - return 0; } @@ -2338,19 +2165,7 @@ static int gfx_v9_0_sw_fini(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) && - adev->gfx.ras_if) { - struct ras_common_if *ras_if = adev->gfx.ras_if; - struct ras_ih_if ih_info = { - .head = *ras_if, - }; - - amdgpu_ras_debugfs_remove(adev, ras_if); - amdgpu_ras_sysfs_remove(adev, ras_if); - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); - amdgpu_ras_feature_enable(adev, ras_if, 0); - kfree(ras_if); - } + amdgpu_gfx_ras_fini(adev); for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -2358,11 +2173,10 @@ static int gfx_v9_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); amdgpu_gfx_mqd_sw_fini(adev); - amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); + amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq.ring); amdgpu_gfx_kiq_fini(adev); gfx_v9_0_mec_fini(adev); - gfx_v9_0_ngg_fini(adev); amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); if (adev->asic_type == CHIP_RAVEN || adev->asic_type == CHIP_RENOIR) { amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj, @@ -2930,7 +2744,10 @@ static void gfx_v9_0_init_pg(struct amdgpu_device *adev) * And it's needed by gfxoff feature. */ if (adev->gfx.rlc.is_rlc_v2_1) { - gfx_v9_1_init_rlc_save_restore_list(adev); + if (adev->asic_type == CHIP_VEGA12 || + (adev->asic_type == CHIP_RAVEN && + adev->rev_id >= 8)) + gfx_v9_1_init_rlc_save_restore_list(adev); gfx_v9_0_enable_save_restore_machine(adev); } @@ -3901,12 +3718,6 @@ static int gfx_v9_0_hw_init(void *handle) if (r) return r; - if (adev->asic_type != CHIP_ARCTURUS) { - r = gfx_v9_0_ngg_en(adev); - if (r) - return r; - } - return r; } @@ -3948,8 +3759,10 @@ static int gfx_v9_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); - /* disable KCQ to avoid CPC touch memory not valid anymore */ - gfx_v9_0_kcq_disable(adev); + /* DF freeze and kcq disable will fail */ + if (!amdgpu_ras_intr_triggered()) + /* disable KCQ to avoid CPC touch memory not valid anymore */ + gfx_v9_0_kcq_disable(adev); if (amdgpu_sriov_vf(adev)) { gfx_v9_0_cp_gfx_enable(adev, false); @@ -4085,9 +3898,22 @@ static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev) uint64_t clock; mutex_lock(&adev->gfx.gpu_clock_mutex); - WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); - clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | - ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) { + uint32_t tmp, lsb, msb, i = 0; + do { + if (i != 0) + udelay(1); + tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); + lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB); + msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB); + i++; + } while (unlikely(tmp != msb) && (i < adev->usec_timeout)); + clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL); + } else { + WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1); + clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) | + ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL); + } mutex_unlock(&adev->gfx.gpu_clock_mutex); return clock; } @@ -4202,6 +4028,7 @@ static const struct soc15_reg_entry sec_ded_counter_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 1, 16}, { SOC15_REG_ENTRY(GC, 0, mmTCP_ATC_EDC_GATCL1_CNT), 0, 4, 16}, { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT), 0, 4, 16}, + { SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 0, 4, 16}, { SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 0, 4, 16}, { SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 0, 4, 6}, { SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 0, 4, 16}, @@ -4221,6 +4048,10 @@ static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev) struct amdgpu_ring *ring = &adev->gfx.compute_ring[0]; int i, r; + /* only support when RAS is enabled */ + if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + return 0; + r = amdgpu_ring_alloc(ring, 7); if (r) { DRM_ERROR("amdgpu: GDS workarounds failed to lock ring %s (%d).\n", @@ -4411,33 +4242,14 @@ static int gfx_v9_0_early_init(void *handle) return 0; } -static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, - struct amdgpu_iv_entry *entry); - static int gfx_v9_0_ecc_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ras_common_if **ras_if = &adev->gfx.ras_if; - struct ras_ih_if ih_info = { - .cb = gfx_v9_0_process_ras_data_cb, - }; - struct ras_fs_if fs_info = { - .sysfs_name = "gfx_err_count", - .debugfs_name = "gfx_err_inject", - }; - struct ras_common_if ras_block = { - .block = AMDGPU_RAS_BLOCK__GFX, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .sub_block_index = 0, - .name = "gfx", - }; int r; - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { - amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0); - return 0; - } + r = amdgpu_gfx_ras_late_init(adev); + if (r) + return r; r = gfx_v9_0_do_edc_gds_workarounds(adev); if (r) @@ -4448,72 +4260,7 @@ static int gfx_v9_0_ecc_late_init(void *handle) if (r) return r; - /* handle resume path. */ - if (*ras_if) { - /* resend ras TA enable cmd during resume. - * prepare to handle failure. - */ - ih_info.head = **ras_if; - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - /* request a gpu reset. will run again. */ - amdgpu_ras_request_reset_on_boot(adev, - AMDGPU_RAS_BLOCK__GFX); - return 0; - } - /* fail to enable ras, cleanup all. */ - goto irq; - } - /* enable successfully. continue. */ - goto resume; - } - - *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL); - if (!*ras_if) - return -ENOMEM; - - **ras_if = ras_block; - - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - amdgpu_ras_request_reset_on_boot(adev, - AMDGPU_RAS_BLOCK__GFX); - r = 0; - } - goto feature; - } - - ih_info.head = **ras_if; - fs_info.head = **ras_if; - - r = amdgpu_ras_interrupt_add_handler(adev, &ih_info); - if (r) - goto interrupt; - - amdgpu_ras_debugfs_create(adev, &fs_info); - - r = amdgpu_ras_sysfs_create(adev, &fs_info); - if (r) - goto sysfs; -resume: - r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); - if (r) - goto irq; - return 0; -irq: - amdgpu_ras_sysfs_remove(adev, *ras_if); -sysfs: - amdgpu_ras_debugfs_remove(adev, *ras_if); - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); -interrupt: - amdgpu_ras_feature_enable(adev, *ras_if, 0); -feature: - kfree(*ras_if); - *ras_if = NULL; - return r; } static int gfx_v9_0_late_init(void *handle) @@ -4578,16 +4325,14 @@ static void gfx_v9_0_update_gfx_cg_power_gating(struct amdgpu_device *adev, { amdgpu_gfx_rlc_enter_safe_mode(adev); - if (is_support_sw_smu(adev) && !enable) - smu_set_gfx_cgpg(&adev->smu, enable); - if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { gfx_v9_0_enable_gfx_cg_power_gating(adev, true); if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) gfx_v9_0_enable_gfx_pipeline_powergating(adev, true); } else { gfx_v9_0_enable_gfx_cg_power_gating(adev, false); - gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) + gfx_v9_0_enable_gfx_pipeline_powergating(adev, false); } amdgpu_gfx_rlc_exit_safe_mode(adev); @@ -4856,8 +4601,6 @@ static int gfx_v9_0_set_powergating_state(void *handle, gfx_v9_0_enable_cp_power_gating(adev, false); /* update gfx cgpg state */ - if (is_support_sw_smu(adev) && enable) - smu_set_gfx_cgpg(&adev->smu, enable); gfx_v9_0_update_gfx_cg_power_gating(adev, enable); /* update mgcg state */ @@ -4988,7 +4731,7 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask, reg_mem_engine; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { switch (ring->me) { @@ -5008,8 +4751,8 @@ static void gfx_v9_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) } gfx_v9_0_wait_reg_mem(ring, reg_mem_engine, 0, 1, - adev->nbio_funcs->get_hdp_flush_req_offset(adev), - adev->nbio_funcs->get_hdp_flush_done_offset(adev), + adev->nbio.funcs->get_hdp_flush_req_offset(adev), + adev->nbio.funcs->get_hdp_flush_done_offset(adev), ref_and_mask, ref_and_mask, 0x20); } @@ -5741,313 +5484,446 @@ static int gfx_v9_0_priv_inst_irq(struct amdgpu_device *adev, return 0; } -static int gfx_v9_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, - struct amdgpu_iv_entry *entry) -{ - /* TODO ue will trigger an interrupt. */ - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->gfx.funcs->query_ras_error_count) - adev->gfx.funcs->query_ras_error_count(adev, err_data); - amdgpu_ras_reset_gpu(adev, 0); - return AMDGPU_RAS_SUCCESS; -} -static const struct { - const char *name; - uint32_t ip; - uint32_t inst; - uint32_t seg; - uint32_t reg_offset; - uint32_t per_se_instance; - int32_t num_instance; - uint32_t sec_count_mask; - uint32_t ded_count_mask; -} gfx_ras_edc_regs[] = { - { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), 0, 1, - REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, SEC_COUNT), - REG_FIELD_MASK(CPC_EDC_SCRATCH_CNT, DED_COUNT) }, - { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), 0, 1, - REG_FIELD_MASK(CPC_EDC_UCODE_CNT, SEC_COUNT), - REG_FIELD_MASK(CPC_EDC_UCODE_CNT, DED_COUNT) }, - { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, - REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME1), 0 }, - { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), 0, 1, - REG_FIELD_MASK(CPF_EDC_ROQ_CNT, COUNT_ME2), 0 }, - { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), 0, 1, - REG_FIELD_MASK(CPF_EDC_TAG_CNT, SEC_COUNT), - REG_FIELD_MASK(CPF_EDC_TAG_CNT, DED_COUNT) }, - { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, - REG_FIELD_MASK(CPG_EDC_DMA_CNT, ROQ_COUNT), 0 }, - { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), 0, 1, - REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_SEC_COUNT), - REG_FIELD_MASK(CPG_EDC_DMA_CNT, TAG_DED_COUNT) }, - { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), 0, 1, - REG_FIELD_MASK(CPG_EDC_TAG_CNT, SEC_COUNT), - REG_FIELD_MASK(CPG_EDC_TAG_CNT, DED_COUNT) }, - { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), 0, 1, - REG_FIELD_MASK(DC_EDC_CSINVOC_CNT, COUNT_ME1), 0 }, - { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), 0, 1, - REG_FIELD_MASK(DC_EDC_RESTORE_CNT, COUNT_ME1), 0 }, - { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), 0, 1, - REG_FIELD_MASK(DC_EDC_STATE_CNT, COUNT_ME1), 0 }, - { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_CNT, GDS_MEM_DED) }, - { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED), 0 }, +static const struct ras_gfx_subblock_reg ras_subblock_regs[] = { + { "CPC_SCRATCH", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_SCRATCH_CNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_SCRATCH_CNT, DED_COUNT) + }, + { "CPC_UCODE", SOC15_REG_ENTRY(GC, 0, mmCPC_EDC_UCODE_CNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPC_EDC_UCODE_CNT, DED_COUNT) + }, + { "CPF_ROQ_ME1", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME1), + 0, 0 + }, + { "CPF_ROQ_ME2", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_ROQ_CNT), + SOC15_REG_FIELD(CPF_EDC_ROQ_CNT, COUNT_ME2), + 0, 0 + }, + { "CPF_TAG", SOC15_REG_ENTRY(GC, 0, mmCPF_EDC_TAG_CNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPF_EDC_TAG_CNT, DED_COUNT) + }, + { "CPG_DMA_ROQ", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), + SOC15_REG_FIELD(CPG_EDC_DMA_CNT, ROQ_COUNT), + 0, 0 + }, + { "CPG_DMA_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_DMA_CNT), + SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_SEC_COUNT), + SOC15_REG_FIELD(CPG_EDC_DMA_CNT, TAG_DED_COUNT) + }, + { "CPG_TAG", SOC15_REG_ENTRY(GC, 0, mmCPG_EDC_TAG_CNT), + SOC15_REG_FIELD(CPG_EDC_TAG_CNT, SEC_COUNT), + SOC15_REG_FIELD(CPG_EDC_TAG_CNT, DED_COUNT) + }, + { "DC_CSINVOC", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_CSINVOC_CNT), + SOC15_REG_FIELD(DC_EDC_CSINVOC_CNT, COUNT_ME1), + 0, 0 + }, + { "DC_RESTORE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_RESTORE_CNT), + SOC15_REG_FIELD(DC_EDC_RESTORE_CNT, COUNT_ME1), + 0, 0 + }, + { "DC_STATE", SOC15_REG_ENTRY(GC, 0, mmDC_EDC_STATE_CNT), + SOC15_REG_FIELD(DC_EDC_STATE_CNT, COUNT_ME1), + 0, 0 + }, + { "GDS_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_MEM_DED) + }, + { "GDS_INPUT_QUEUE", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_CNT), + SOC15_REG_FIELD(GDS_EDC_CNT, GDS_INPUT_QUEUE_SED), + 0, 0 + }, { "GDS_ME0_CS_PIPE_MEM", SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), - 0, 1, REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) }, + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, ME0_CS_PIPE_MEM_DED) + }, { "GDS_OA_PHY_PHY_CMD_RAM_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_CMD_RAM_MEM_DED) + }, { "GDS_OA_PHY_PHY_DATA_RAM_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED), 0 }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PHY_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PHY_CNT, PHY_DATA_RAM_MEM_SED), + 0, 0 + }, { "GDS_OA_PIPE_ME1_PIPE0_PIPE_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE0_PIPE_MEM_DED) + }, { "GDS_OA_PIPE_ME1_PIPE1_PIPE_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE1_PIPE_MEM_DED) + }, { "GDS_OA_PIPE_ME1_PIPE2_PIPE_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE2_PIPE_MEM_DED) + }, { "GDS_OA_PIPE_ME1_PIPE3_PIPE_MEM", - SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), 0, 1, - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC), - REG_FIELD_MASK(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) }, - { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), 1, 1, - REG_FIELD_MASK(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT), 0 }, - { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT), - REG_FIELD_MASK(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) }, - { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT), 0 }, - { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT), 0 }, - { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT), 0 }, - { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), 1, 16, - REG_FIELD_MASK(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT), 0 }, - { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2, - REG_FIELD_MASK(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT), 0 }, - { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 2, - REG_FIELD_MASK(TCA_EDC_CNT, REQ_FIFO_SED_COUNT), 0 }, - { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) }, - { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) }, - { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) }, - { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) }, - { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT), - REG_FIELD_MASK(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) }, - { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT), 0 }, - { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT), 0 }, - { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT), 0 }, - { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, RETURN_DATA_SED_COUNT), 0 }, - { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT), 0 }, - { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT), 0 }, - { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT), 0 }, - { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 16, - REG_FIELD_MASK(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT), 0 }, - { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, - 16, REG_FIELD_MASK(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT), 0 }, + SOC15_REG_ENTRY(GC, 0, mmGDS_EDC_OA_PIPE_CNT), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_SEC), + SOC15_REG_FIELD(GDS_EDC_OA_PIPE_CNT, ME1_PIPE3_PIPE_MEM_DED) + }, + { "SPI_SR_MEM", SOC15_REG_ENTRY(GC, 0, mmSPI_EDC_CNT), + SOC15_REG_FIELD(SPI_EDC_CNT, SPI_SR_MEM_SED_COUNT), + 0, 0 + }, + { "TA_FS_DFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_SEC_COUNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_DFIFO_DED_COUNT) + }, + { "TA_FS_AFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_AFIFO_SED_COUNT), + 0, 0 + }, + { "TA_FL_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FL_LFIFO_SED_COUNT), + 0, 0 + }, + { "TA_FX_LFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FX_LFIFO_SED_COUNT), + 0, 0 + }, + { "TA_FS_CFIFO", SOC15_REG_ENTRY(GC, 0, mmTA_EDC_CNT), + SOC15_REG_FIELD(TA_EDC_CNT, TA_FS_CFIFO_SED_COUNT), + 0, 0 + }, + { "TCA_HOLE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, HOLE_FIFO_SED_COUNT), + 0, 0 + }, + { "TCA_REQ_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), + SOC15_REG_FIELD(TCA_EDC_CNT, REQ_FIFO_SED_COUNT), + 0, 0 + }, + { "TCC_CACHE_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DATA_DED_COUNT) + }, + { "TCC_CACHE_DIRTY", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, CACHE_DIRTY_DED_COUNT) + }, + { "TCC_HIGH_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, HIGH_RATE_TAG_DED_COUNT) + }, + { "TCC_LOW_RATE_TAG", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LOW_RATE_TAG_DED_COUNT) + }, + { "TCC_SRC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_SEC_COUNT), + SOC15_REG_FIELD(TCC_EDC_CNT, SRC_FIFO_DED_COUNT) + }, + { "TCC_IN_USE_DEC", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_DEC_SED_COUNT), + 0, 0 + }, + { "TCC_IN_USE_TRANSFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, IN_USE_TRANSFER_SED_COUNT), + 0, 0 + }, + { "TCC_LATENCY_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, LATENCY_FIFO_SED_COUNT), + 0, 0 + }, + { "TCC_RETURN_DATA", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_DATA_SED_COUNT), + 0, 0 + }, + { "TCC_RETURN_CONTROL", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, RETURN_CONTROL_SED_COUNT), + 0, 0 + }, + { "TCC_UC_ATOMIC_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT), + SOC15_REG_FIELD(TCC_EDC_CNT, UC_ATOMIC_FIFO_SED_COUNT), + 0, 0 + }, + { "TCC_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_RETURN_SED_COUNT), + 0, 0 + }, + { "TCC_WRITE_CACHE_READ", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, WRITE_CACHE_READ_SED_COUNT), + 0, 0 + }, + { "TCC_SRC_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, SRC_FIFO_NEXT_RAM_SED_COUNT), + 0, 0 + }, { "TCC_LATENCY_FIFO_NEXT_RAM", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), - 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT), - 0 }, - { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, - 16, REG_FIELD_MASK(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT), 0 }, + SOC15_REG_FIELD(TCC_EDC_CNT2, LATENCY_FIFO_NEXT_RAM_SED_COUNT), + 0, 0 + }, + { "TCC_CACHE_TAG_PROBE_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, CACHE_TAG_PROBE_FIFO_SED_COUNT), + 0, 0 + }, { "TCC_WRRET_TAG_WRITE_RETURN", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), - 0, 16, REG_FIELD_MASK(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT), - 0 }, - { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, - 16, REG_FIELD_MASK(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT), 0 }, - { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), 0, 72, - REG_FIELD_MASK(TCI_EDC_CNT, WRITE_RAM_SED_COUNT), 0 }, - { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT), - REG_FIELD_MASK(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) }, - { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT), - REG_FIELD_MASK(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) }, - { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT), 0 }, - { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), 0 }, - { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), 0 }, - { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT), - REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) }, - { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), 1, 16, - REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT), - REG_FIELD_MASK(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) }, - { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16, - REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT), - REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) }, - { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16, - REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT), - REG_FIELD_MASK(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) }, - { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), 1, 16, - REG_FIELD_MASK(TD_EDC_CNT, CS_FIFO_SED_COUNT), 0 }, - { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, LDS_D_DED_COUNT) }, - { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, LDS_I_DED_COUNT) }, - { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, SGPR_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, SGPR_DED_COUNT) }, - { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, VGPR0_DED_COUNT) }, - { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, VGPR1_DED_COUNT) }, - { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, VGPR2_DED_COUNT) }, - { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), 1, 16, - REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_SEC_COUNT), - REG_FIELD_MASK(SQ_EDC_CNT, VGPR3_DED_COUNT) }, + SOC15_REG_FIELD(TCC_EDC_CNT2, WRRET_TAG_WRITE_RETURN_SED_COUNT), + 0, 0 + }, + { "TCC_ATOMIC_RETURN_BUFFER", SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), + SOC15_REG_FIELD(TCC_EDC_CNT2, ATOMIC_RETURN_BUFFER_SED_COUNT), + 0, 0 + }, + { "TCI_WRITE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCI_EDC_CNT), + SOC15_REG_FIELD(TCI_EDC_CNT, WRITE_RAM_SED_COUNT), + 0, 0 + }, + { "TCP_CACHE_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CACHE_RAM_DED_COUNT) + }, + { "TCP_LFIFO_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, LFIFO_RAM_DED_COUNT) + }, + { "TCP_CMD_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, CMD_FIFO_SED_COUNT), + 0, 0 + }, + { "TCP_VM_FIFO", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, VM_FIFO_SEC_COUNT), + 0, 0 + }, + { "TCP_DB_RAM", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, DB_RAM_SED_COUNT), + 0, 0 + }, + { "TCP_UTCL1_LFIFO0", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO0_DED_COUNT) + }, + { "TCP_UTCL1_LFIFO1", SOC15_REG_ENTRY(GC, 0, mmTCP_EDC_CNT_NEW), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_SEC_COUNT), + SOC15_REG_FIELD(TCP_EDC_CNT_NEW, UTCL1_LFIFO1_DED_COUNT) + }, + { "TD_SS_FIFO_LO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_LO_DED_COUNT) + }, + { "TD_SS_FIFO_HI", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_SEC_COUNT), + SOC15_REG_FIELD(TD_EDC_CNT, SS_FIFO_HI_DED_COUNT) + }, + { "TD_CS_FIFO", SOC15_REG_ENTRY(GC, 0, mmTD_EDC_CNT), + SOC15_REG_FIELD(TD_EDC_CNT, CS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQ_LDS_D", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_D_DED_COUNT) + }, + { "SQ_LDS_I", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, LDS_I_DED_COUNT) + }, + { "SQ_SGPR", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, SGPR_DED_COUNT) + }, + { "SQ_VGPR0", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR0_DED_COUNT) + }, + { "SQ_VGPR1", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR1_DED_COUNT) + }, + { "SQ_VGPR2", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR2_DED_COUNT) + }, + { "SQ_VGPR3", SOC15_REG_ENTRY(GC, 0, mmSQ_EDC_CNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_SEC_COUNT), + SOC15_REG_FIELD(SQ_EDC_CNT, VGPR3_DED_COUNT) + }, { "SQC_DATA_CU0_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), - 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) }, - { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) }, + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_WRITE_DATA_BUF_DED_COUNT) + }, + { "SQC_DATA_CU0_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU0_UTCL1_LFIFO_DED_COUNT) + }, { "SQC_DATA_CU1_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), - 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) }, - { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) }, + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_WRITE_DATA_BUF_DED_COUNT) + }, + { "SQC_DATA_CU1_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU1_UTCL1_LFIFO_DED_COUNT) + }, { "SQC_DATA_CU2_WRITE_DATA_BUF", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), - 1, 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) }, - { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) }, - { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) }, - { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) }, - { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) }, - { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) }, - { "SQC_INST_BANKA_UTCL1_MISS_FIFO", - SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT), - 0 }, - { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKA_DIRTY_BIT_RAM", - SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT), 0 }, - { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) }, - { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) }, - { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) }, - { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) }, - { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT), - REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) }, - { "SQC_INST_BANKB_UTCL1_MISS_FIFO", - SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT), - 0 }, - { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, - 6, REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT), 0 }, - { "SQC_DATA_BANKB_DIRTY_BIT_RAM", - SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 1, 6, - REG_FIELD_MASK(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT), 0 }, - { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) }, - { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) }, - { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) }, - { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) }, - { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) }, - { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), 0 }, - { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), 0 }, - { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), 0 }, - { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), 0 }, - { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), 0 }, - { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) }, - { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) }, - { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) }, - { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), 0 }, - { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), 0 }, - { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), 0 }, - { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), 0 }, - { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), 0 }, - { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), 0, 32, - REG_FIELD_MASK(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), 0 }, + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_WRITE_DATA_BUF_DED_COUNT) + }, + { "SQC_DATA_CU2_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT, DATA_CU2_UTCL1_LFIFO_DED_COUNT) + }, + { "SQC_INST_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_TAG_RAM_DED_COUNT) + }, + { "SQC_INST_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_BANK_RAM_DED_COUNT) + }, + { "SQC_DATA_BANKA_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_TAG_RAM_DED_COUNT) + }, + { "SQC_DATA_BANKA_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_BANK_RAM_DED_COUNT) + }, + { "SQC_INST_BANKA_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_UTCL1_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_INST_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_BANKA_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKA_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_HIT_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKA_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKA_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, DATA_BANKA_DIRTY_BIT_RAM_SED_COUNT), + 0, 0 + }, + { "SQC_INST_UTCL1_LFIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT2), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT2, INST_UTCL1_LFIFO_DED_COUNT) + }, + { "SQC_INST_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_TAG_RAM_DED_COUNT) + }, + { "SQC_INST_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_BANK_RAM_DED_COUNT) + }, + { "SQC_DATA_BANKB_TAG_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_TAG_RAM_DED_COUNT) + }, + { "SQC_DATA_BANKB_BANK_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_SEC_COUNT), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_BANK_RAM_DED_COUNT) + }, + { "SQC_INST_BANKB_UTCL1_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_UTCL1_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_INST_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, INST_BANKB_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKB_HIT_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_HIT_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKB_MISS_FIFO", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_MISS_FIFO_SED_COUNT), + 0, 0 + }, + { "SQC_DATA_BANKB_DIRTY_BIT_RAM", SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), + SOC15_REG_FIELD(SQC_EDC_CNT3, DATA_BANKB_DIRTY_BIT_RAM_SED_COUNT), + 0, 0 + }, + { "EA_DRAMRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_CMDMEM_DED_COUNT) + }, + { "EA_DRAMWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_CMDMEM_DED_COUNT) + }, + { "EA_DRAMWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_DATAMEM_DED_COUNT) + }, + { "EA_RRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, RRET_TAGMEM_DED_COUNT) + }, + { "EA_WRET_TAGMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, WRET_TAGMEM_DED_COUNT) + }, + { "EA_DRAMRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMRD_PAGEMEM_SED_COUNT), + 0, 0 + }, + { "EA_DRAMWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, DRAMWR_PAGEMEM_SED_COUNT), + 0, 0 + }, + { "EA_IORD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IORD_CMDMEM_SED_COUNT), + 0, 0 + }, + { "EA_IOWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_CMDMEM_SED_COUNT), + 0, 0 + }, + { "EA_IOWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT), + SOC15_REG_FIELD(GCEA_EDC_CNT, IOWR_DATAMEM_SED_COUNT), + 0, 0 + }, + { "GMIRD_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_CMDMEM_DED_COUNT) + }, + { "GMIWR_CMDMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_CMDMEM_DED_COUNT) + }, + { "GMIWR_DATAMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_SEC_COUNT), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_DATAMEM_DED_COUNT) + }, + { "GMIRD_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIRD_PAGEMEM_SED_COUNT), + 0, 0 + }, + { "GMIWR_PAGEMEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, GMIWR_PAGEMEM_SED_COUNT), + 0, 0 + }, + { "MAM_D0MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D0MEM_SED_COUNT), + 0, 0 + }, + { "MAM_D1MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D1MEM_SED_COUNT), + 0, 0 + }, + { "MAM_D2MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D2MEM_SED_COUNT), + 0, 0 + }, + { "MAM_D3MEM", SOC15_REG_ENTRY(GC, 0, mmGCEA_EDC_CNT2), + SOC15_REG_FIELD(GCEA_EDC_CNT2, MAM_D3MEM_SED_COUNT), + 0, 0 + } }; static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, @@ -6096,14 +5972,217 @@ static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, return ret; } +static const char *vml2_mems[] = { + "UTC_VML2_BANK_CACHE_0_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_0_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_0_4K_MEM0", + "UTC_VML2_BANK_CACHE_0_4K_MEM1", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_1_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_1_4K_MEM0", + "UTC_VML2_BANK_CACHE_1_4K_MEM1", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_2_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_2_4K_MEM0", + "UTC_VML2_BANK_CACHE_2_4K_MEM1", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM0", + "UTC_VML2_BANK_CACHE_3_BIGK_MEM1", + "UTC_VML2_BANK_CACHE_3_4K_MEM0", + "UTC_VML2_BANK_CACHE_3_4K_MEM1", +}; + +static const char *vml2_walker_mems[] = { + "UTC_VML2_CACHE_PDE0_MEM0", + "UTC_VML2_CACHE_PDE0_MEM1", + "UTC_VML2_CACHE_PDE1_MEM0", + "UTC_VML2_CACHE_PDE1_MEM1", + "UTC_VML2_CACHE_PDE2_MEM0", + "UTC_VML2_CACHE_PDE2_MEM1", + "UTC_VML2_RDIF_LOG_FIFO", +}; + +static const char *atc_l2_cache_2m_mems[] = { + "UTC_ATCL2_CACHE_2M_BANK0_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK0_WAY1_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY0_MEM", + "UTC_ATCL2_CACHE_2M_BANK1_WAY1_MEM", +}; + +static const char *atc_l2_cache_4k_mems[] = { + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK0_WAY1_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY0_MEM7", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM0", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM1", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM2", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM3", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM4", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM5", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM6", + "UTC_ATCL2_CACHE_4K_BANK1_WAY1_MEM7", +}; + +static int gfx_v9_0_query_utc_edc_status(struct amdgpu_device *adev, + struct ras_err_data *err_data) +{ + uint32_t i, data; + uint32_t sec_count, ded_count; + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT, 0); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT, 0); + + for (i = 0; i < 16; i++) { + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_CNT); + + sec_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VM_L2_MEM_ECC_CNT, DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < 7; i++) { + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_CNT); + + sec_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT, + SEC_COUNT); + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + vml2_walker_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = REG_GET_FIELD(data, VM_L2_WALKER_MEM_ECC_CNT, + DED_COUNT); + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + vml2_walker_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + for (i = 0; i < 4; i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_CNT); + + sec_count = (data & 0x00006000L) >> 0xd; + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_2m_mems[i], sec_count); + err_data->ce_count += sec_count; + } + } + + for (i = 0; i < 32; i++) { + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, i); + data = RREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_CNT); + + sec_count = (data & 0x00006000L) >> 0xd; + if (sec_count) { + DRM_INFO("Instance[%d]: SubBlock %s, SEC %d\n", i, + atc_l2_cache_4k_mems[i], sec_count); + err_data->ce_count += sec_count; + } + + ded_count = (data & 0x00018000L) >> 0xf; + if (ded_count) { + DRM_INFO("Instance[%d]: SubBlock %s, DED %d\n", i, + atc_l2_cache_4k_mems[i], ded_count); + err_data->ue_count += ded_count; + } + } + + WREG32_SOC15(GC, 0, mmVM_L2_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmVM_L2_WALKER_MEM_ECC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_2M_EDC_INDEX, 255); + WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); + + return 0; +} + +static int __get_ras_error_count(const struct soc15_reg_entry *reg, + uint32_t se_id, uint32_t inst_id, uint32_t value, + uint32_t *sec_count, uint32_t *ded_count) +{ + uint32_t i; + uint32_t sec_cnt, ded_cnt; + + for (i = 0; i < ARRAY_SIZE(ras_subblock_regs); i++) { + if(ras_subblock_regs[i].reg_offset != reg->reg_offset || + ras_subblock_regs[i].seg != reg->seg || + ras_subblock_regs[i].inst != reg->inst) + continue; + + sec_cnt = (value & + ras_subblock_regs[i].sec_count_mask) >> + ras_subblock_regs[i].sec_count_shift; + if (sec_cnt) { + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], SEC %d\n", + ras_subblock_regs[i].name, + se_id, inst_id, + sec_cnt); + *sec_count += sec_cnt; + } + + ded_cnt = (value & + ras_subblock_regs[i].ded_count_mask) >> + ras_subblock_regs[i].ded_count_shift; + if (ded_cnt) { + DRM_INFO("GFX SubBlock %s, Instance[%d][%d], DED %d\n", + ras_subblock_regs[i].name, + se_id, inst_id, + ded_cnt); + *ded_count += ded_cnt; + } + } + + return 0; +} + static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; - uint32_t sec_count, ded_count; - uint32_t i; + uint32_t sec_count = 0, ded_count = 0; + uint32_t i, j, k; uint32_t reg_value; - uint32_t se_id, instance_id; if (adev->asic_type != CHIP_VEGA20) return -EINVAL; @@ -6112,71 +6191,29 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, err_data->ce_count = 0; mutex_lock(&adev->grbm_idx_mutex); - for (se_id = 0; se_id < adev->gfx.config.max_shader_engines; se_id++) { - for (instance_id = 0; instance_id < 256; instance_id++) { - for (i = 0; - i < sizeof(gfx_ras_edc_regs) / sizeof(gfx_ras_edc_regs[0]); - i++) { - if (se_id != 0 && - !gfx_ras_edc_regs[i].per_se_instance) - continue; - if (instance_id >= gfx_ras_edc_regs[i].num_instance) - continue; - - gfx_v9_0_select_se_sh(adev, se_id, 0, - instance_id); - - reg_value = RREG32( - adev->reg_offset[gfx_ras_edc_regs[i].ip] - [gfx_ras_edc_regs[i].inst] - [gfx_ras_edc_regs[i].seg] + - gfx_ras_edc_regs[i].reg_offset); - sec_count = reg_value & - gfx_ras_edc_regs[i].sec_count_mask; - ded_count = reg_value & - gfx_ras_edc_regs[i].ded_count_mask; - if (sec_count) { - DRM_INFO( - "Instance[%d][%d]: SubBlock %s, SEC %d\n", - se_id, instance_id, - gfx_ras_edc_regs[i].name, - sec_count); - err_data->ce_count++; - } - if (ded_count) { - DRM_INFO( - "Instance[%d][%d]: SubBlock %s, DED %d\n", - se_id, instance_id, - gfx_ras_edc_regs[i].name, - ded_count); - err_data->ue_count++; - } + for (i = 0; i < ARRAY_SIZE(sec_ded_counter_registers); i++) { + for (j = 0; j < sec_ded_counter_registers[i].se_num; j++) { + for (k = 0; k < sec_ded_counter_registers[i].instance; k++) { + gfx_v9_0_select_se_sh(adev, j, 0, k); + reg_value = + RREG32(SOC15_REG_ENTRY_OFFSET(sec_ded_counter_registers[i])); + if (reg_value) + __get_ras_error_count(&sec_ded_counter_registers[i], + j, k, reg_value, + &sec_count, &ded_count); } } } - gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); - - return 0; -} -static int gfx_v9_0_cp_ecc_error_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - struct ras_common_if *ras_if = adev->gfx.ras_if; - struct ras_dispatch_if ih_data = { - .entry = entry, - }; + err_data->ce_count += sec_count; + err_data->ue_count += ded_count; - if (!ras_if) - return 0; + gfx_v9_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); - ih_data.head = *ras_if; + gfx_v9_0_query_utc_edc_status(adev, err_data); - DRM_ERROR("CP ECC ERROR IRQ\n"); - amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; } @@ -6343,7 +6380,7 @@ static const struct amdgpu_irq_src_funcs gfx_v9_0_priv_inst_irq_funcs = { static const struct amdgpu_irq_src_funcs gfx_v9_0_cp_ecc_error_irq_funcs = { .set = gfx_v9_0_set_cp_ecc_error_state, - .process = gfx_v9_0_cp_ecc_error_irq, + .process = amdgpu_gfx_cp_ecc_error_irq, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c index 6ce37ce77d14..e91bd7945777 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v1_0.c @@ -178,6 +178,8 @@ static void gfxhub_v1_0_enable_system_domain(struct amdgpu_device *adev) tmp = RREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(GC, 0, mmVM_CONTEXT0_CNTL, tmp); } @@ -365,6 +367,8 @@ void gfxhub_v1_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(GC, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(GC, 0, mmVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c index db10640a3b2f..b70c7b483c24 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c @@ -46,21 +46,25 @@ u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev) return (u64)RREG32_SOC15(GC, 0, mmGCMC_VM_FB_OFFSET) << 24; } -static void gfxhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev) +void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) { - uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); + /* two registers distance between mmGCVM_CONTEXT0_* to mmGCVM_CONTEXT1_* */ + int offset = mmGCVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 + - mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + offset * vmid, lower_32_bits(page_table_base)); - WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - lower_32_bits(value)); - - WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - upper_32_bits(value)); + WREG32_SOC15_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + offset * vmid, upper_32_bits(page_table_base)); } static void gfxhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) { - gfxhub_v2_0_init_gart_pt_regs(adev); + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + gfxhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base); WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, (u32)(adev->gmc.gart_start >> 12)); @@ -175,6 +179,8 @@ static void gfxhub_v2_0_enable_system_domain(struct amdgpu_device *adev) tmp = RREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, GCVM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(GC, 0, mmGCVM_CONTEXT0_CNTL, tmp); } @@ -350,6 +356,8 @@ void gfxhub_v2_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(GC, 0, mmGCVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(GC, 0, mmGCVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h index 06807940748b..392b8cd94fc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.h @@ -31,5 +31,7 @@ void gfxhub_v2_0_set_fault_enable_default(struct amdgpu_device *adev, bool value); void gfxhub_v2_0_init(struct amdgpu_device *adev); u64 gfxhub_v2_0_get_mc_fb_offset(struct amdgpu_device *adev); +void gfxhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 5c7d5f73f54f..321f8a997be8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -235,6 +235,29 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, const unsigned eng = 17; unsigned int i; + spin_lock(&adev->gmc.invalidate_lock); + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) { + for (i = 0; i < adev->usec_timeout; i++) { + /* a read return value of 1 means semaphore acuqire */ + tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); + if (tmp & 0x1) + break; + udelay(1); + } + + if (i >= adev->usec_timeout) + DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); + } + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); /* @@ -254,6 +277,17 @@ static void gmc_v10_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, udelay(1); } + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0); + + spin_unlock(&adev->gmc.invalidate_lock); + if (i < adev->usec_timeout) return; @@ -278,7 +312,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, int r; /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); mutex_lock(&adev->mman.gtt_window_lock); @@ -338,6 +372,20 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, uint32_t req = gmc_v10_0_get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || + ring->funcs->vmhub == AMDGPU_MMHUB_1) + /* a read return value of 1 means semaphore acuqire */ + amdgpu_ring_emit_reg_wait(ring, + hub->vm_inv_eng0_sem + eng, 0x1, 0x1); + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), lower_32_bits(pd_addr)); @@ -348,6 +396,15 @@ static uint64_t gmc_v10_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, hub->vm_inv_eng0_ack + eng, req, 1 << vmid); + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || + ring->funcs->vmhub == AMDGPU_MMHUB_1) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0); + return pd_addr; } @@ -396,43 +453,23 @@ static void gmc_v10_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid * 1 system * 0 valid */ -static uint64_t gmc_v10_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) -{ - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) - pte_flag |= AMDGPU_PTE_EXECUTABLE; - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - switch (flags & AMDGPU_VM_MTYPE_MASK) { +static uint64_t gmc_v10_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) +{ + switch (flags) { case AMDGPU_VM_MTYPE_DEFAULT: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); case AMDGPU_VM_MTYPE_NC: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); case AMDGPU_VM_MTYPE_WC: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_WC); case AMDGPU_VM_MTYPE_CC: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_CC); case AMDGPU_VM_MTYPE_UC: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_UC); default: - pte_flag |= AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_NV10(MTYPE_NC); } - - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; } static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, @@ -459,12 +496,32 @@ static void gmc_v10_0_get_vm_pde(struct amdgpu_device *adev, int level, } } +static void gmc_v10_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + + *flags &= ~AMDGPU_PTE_MTYPE_NV10_MASK; + *flags |= (mapping->flags & AMDGPU_PTE_MTYPE_NV10_MASK); + + if (mapping->flags & AMDGPU_PTE_PRT) { + *flags |= AMDGPU_PTE_PRT; + *flags |= AMDGPU_PTE_SNOOPED; + *flags |= AMDGPU_PTE_LOG; + *flags |= AMDGPU_PTE_SYSTEM; + *flags &= ~AMDGPU_PTE_VALID; + } +} + static const struct amdgpu_gmc_funcs gmc_v10_0_gmc_funcs = { .flush_gpu_tlb = gmc_v10_0_flush_gpu_tlb, .emit_flush_gpu_tlb = gmc_v10_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v10_0_emit_pasid_mapping, - .get_vm_pte_flags = gmc_v10_0_get_vm_pte_flags, - .get_vm_pde = gmc_v10_0_get_vm_pde + .map_mtype = gmc_v10_0_map_mtype, + .get_vm_pde = gmc_v10_0_get_vm_pde, + .get_vm_pte = gmc_v10_0_get_vm_pte }; static void gmc_v10_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -518,8 +575,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, { u64 base = 0; - if (!amdgpu_sriov_vf(adev)) - base = gfxhub_v2_0_get_fb_location(adev); + base = gfxhub_v2_0_get_fb_location(adev); amdgpu_gmc_vram_location(adev, &adev->gmc, base); amdgpu_gmc_gart_location(adev, mc); @@ -539,24 +595,13 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev, */ static int gmc_v10_0_mc_init(struct amdgpu_device *adev) { - int chansize, numchan; - - if (!amdgpu_emu_mode) - adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); - else { - /* hard code vram_width for emulation */ - chansize = 128; - numchan = 1; - adev->gmc.vram_width = numchan * chansize; - } - /* Could aper size report 0 ? */ adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); /* size in MB on si */ adev->gmc.mc_vram_size = - adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; + adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; adev->gmc.real_vram_size = adev->gmc.mc_vram_size; adev->gmc.visible_vram_size = adev->gmc.aper_size; @@ -635,7 +680,7 @@ static unsigned gmc_v10_0_get_vbios_fb_size(struct amdgpu_device *adev) static int gmc_v10_0_sw_init(void *handle) { - int r; + int r, vram_width = 0, vram_type = 0, vram_vendor = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfxhub_v2_0_init(adev); @@ -643,7 +688,15 @@ static int gmc_v10_0_sw_init(void *handle) spin_lock_init(&adev->gmc.invalidate_lock); - adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); + r = amdgpu_atomfirmware_get_vram_info(adev, + &vram_width, &vram_type, &vram_vendor); + if (!amdgpu_emu_mode) + adev->gmc.vram_width = vram_width; + else + adev->gmc.vram_width = 1 * 128; /* numchan * chansize */ + + adev->gmc.vram_type = vram_type; + adev->gmc.vram_vendor = vram_vendor; switch (adev->asic_type) { case CHIP_NAVI10: case CHIP_NAVI14: @@ -793,7 +846,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) WREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL, tmp); /* Flush HDP after it is initialized */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); value = (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) ? false : true; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 9fb1765e92d1..b205039350b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -386,27 +386,20 @@ static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, return pd_addr; } -static uint64_t gmc_v6_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) -{ - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; -} - static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { BUG_ON(*addr & 0xFFFFFF0000000FFFULL); } +static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags &= ~AMDGPU_PTE_PRT; +} + static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) { @@ -1153,7 +1146,7 @@ static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = { .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb, .set_prt = gmc_v6_0_set_prt, .get_vm_pde = gmc_v6_0_get_vm_pde, - .get_vm_pte_flags = gmc_v6_0_get_vm_pte_flags + .get_vm_pte = gmc_v6_0_get_vm_pte, }; static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 0c3d9bc3a641..f08e5330642d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -463,27 +463,20 @@ static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); } -static uint64_t gmc_v7_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) -{ - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; -} - static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { BUG_ON(*addr & 0xFFFFFF0000000FFFULL); } +static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags &= ~AMDGPU_PTE_PRT; +} + /** * gmc_v8_0_set_fault_enable_default - update VM fault handling * @@ -1343,8 +1336,8 @@ static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping, .set_prt = gmc_v7_0_set_prt, - .get_vm_pte_flags = gmc_v7_0_get_vm_pte_flags, - .get_vm_pde = gmc_v7_0_get_vm_pde + .get_vm_pde = gmc_v7_0_get_vm_pde, + .get_vm_pte = gmc_v7_0_get_vm_pte }; static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index ea764dd9245d..6d96d40fbcb8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -686,29 +686,21 @@ static void gmc_v8_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, * 0 valid */ -static uint64_t gmc_v8_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) -{ - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) - pte_flag |= AMDGPU_PTE_EXECUTABLE; - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; -} - static void gmc_v8_0_get_vm_pde(struct amdgpu_device *adev, int level, uint64_t *addr, uint64_t *flags) { BUG_ON(*addr & 0xFFFFFF0000000FFFULL); } +static void gmc_v8_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + *flags &= ~AMDGPU_PTE_PRT; +} + /** * gmc_v8_0_set_fault_enable_default - update VM fault handling * @@ -1711,8 +1703,8 @@ static const struct amdgpu_gmc_funcs gmc_v8_0_gmc_funcs = { .emit_flush_gpu_tlb = gmc_v8_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v8_0_emit_pasid_mapping, .set_prt = gmc_v8_0_set_prt, - .get_vm_pte_flags = gmc_v8_0_get_vm_pte_flags, - .get_vm_pde = gmc_v8_0_get_vm_pde + .get_vm_pde = gmc_v8_0_get_vm_pde, + .get_vm_pte = gmc_v8_0_get_vm_pte }; static const struct amdgpu_irq_src_funcs gmc_v8_0_irq_funcs = { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index f91337030dc0..3c355fb5d2b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -51,10 +51,12 @@ #include "gfxhub_v1_1.h" #include "mmhub_v9_4.h" #include "umc_v6_1.h" +#include "umc_v6_0.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" #include "amdgpu_ras.h" +#include "amdgpu_xgmi.h" /* add these here since we already include dce12 headers and these are for DCN */ #define mmHUBP0_DCSURF_PRI_VIEWPORT_DIMENSION 0x055d @@ -243,44 +245,6 @@ static int gmc_v9_0_ecc_interrupt_state(struct amdgpu_device *adev, return 0; } -static int gmc_v9_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, - struct amdgpu_iv_entry *entry) -{ - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->umc.funcs->query_ras_error_count) - adev->umc.funcs->query_ras_error_count(adev, err_data); - /* umc query_ras_error_address is also responsible for clearing - * error status - */ - if (adev->umc.funcs->query_ras_error_address) - adev->umc.funcs->query_ras_error_address(adev, err_data); - - /* only uncorrectable error needs gpu reset */ - if (err_data->ue_count) - amdgpu_ras_reset_gpu(adev, 0); - - return AMDGPU_RAS_SUCCESS; -} - -static int gmc_v9_0_process_ecc_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - struct ras_common_if *ras_if = adev->gmc.umc_ras_if; - struct ras_dispatch_if ih_data = { - .entry = entry, - }; - - if (!ras_if) - return 0; - - ih_data.head = *ras_if; - - amdgpu_ras_interrupt_dispatch(adev, &ih_data); - return 0; -} - static int gmc_v9_0_vm_fault_interrupt_state(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type, @@ -355,6 +319,10 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev, } /* If it's the first fault for this address, process it normally */ + if (retry_fault && !in_interrupt() && + amdgpu_vm_handle_fault(adev, entry->pasid, addr)) + return 1; /* This also prevents sending it to KFD */ + if (!amdgpu_sriov_vf(adev)) { /* * Issue a dummy read to wait for the status register to @@ -417,7 +385,7 @@ static const struct amdgpu_irq_src_funcs gmc_v9_0_irq_funcs = { static const struct amdgpu_irq_src_funcs gmc_v9_0_ecc_funcs = { .set = gmc_v9_0_ecc_interrupt_state, - .process = gmc_v9_0_process_ecc_irq, + .process = amdgpu_umc_process_ecc_irq, }; static void gmc_v9_0_set_irq_funcs(struct amdgpu_device *adev) @@ -491,6 +459,29 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, } spin_lock(&adev->gmc.invalidate_lock); + + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) { + for (j = 0; j < adev->usec_timeout; j++) { + /* a read return value of 1 means semaphore acuqire */ + tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng); + if (tmp & 0x1) + break; + udelay(1); + } + + if (j >= adev->usec_timeout) + DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); + } + WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, tmp); /* @@ -506,7 +497,18 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, break; udelay(1); } + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (vmhub == AMDGPU_MMHUB_0 || + vmhub == AMDGPU_MMHUB_1) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + WREG32_NO_KIQ(hub->vm_inv_eng0_sem + eng, 0); + spin_unlock(&adev->gmc.invalidate_lock); + if (j < adev->usec_timeout) return; @@ -521,6 +523,20 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, uint32_t req = gmc_v9_0_get_invalidate_req(vmid, 0); unsigned eng = ring->vm_inv_eng; + /* + * It may lose gpuvm invalidate acknowldege state across power-gating + * off cycle, add semaphore acquire before invalidation and semaphore + * release after invalidation to avoid entering power gated state + * to WA the Issue + */ + + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || + ring->funcs->vmhub == AMDGPU_MMHUB_1) + /* a read return value of 1 means semaphore acuqire */ + amdgpu_ring_emit_reg_wait(ring, + hub->vm_inv_eng0_sem + eng, 0x1, 0x1); + amdgpu_ring_emit_wreg(ring, hub->ctx0_ptb_addr_lo32 + (2 * vmid), lower_32_bits(pd_addr)); @@ -531,6 +547,15 @@ static uint64_t gmc_v9_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, hub->vm_inv_eng0_ack + eng, req, 1 << vmid); + /* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */ + if (ring->funcs->vmhub == AMDGPU_MMHUB_0 || + ring->funcs->vmhub == AMDGPU_MMHUB_1) + /* + * add semaphore release after invalidation, + * write with 0 means semaphore release + */ + amdgpu_ring_emit_wreg(ring, hub->vm_inv_eng0_sem + eng, 0); + return pd_addr; } @@ -584,44 +609,25 @@ static void gmc_v9_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned vmid, * 0 valid */ -static uint64_t gmc_v9_0_get_vm_pte_flags(struct amdgpu_device *adev, - uint32_t flags) +static uint64_t gmc_v9_0_map_mtype(struct amdgpu_device *adev, uint32_t flags) { - uint64_t pte_flag = 0; - - if (flags & AMDGPU_VM_PAGE_EXECUTABLE) - pte_flag |= AMDGPU_PTE_EXECUTABLE; - if (flags & AMDGPU_VM_PAGE_READABLE) - pte_flag |= AMDGPU_PTE_READABLE; - if (flags & AMDGPU_VM_PAGE_WRITEABLE) - pte_flag |= AMDGPU_PTE_WRITEABLE; - - switch (flags & AMDGPU_VM_MTYPE_MASK) { + switch (flags) { case AMDGPU_VM_MTYPE_DEFAULT: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); case AMDGPU_VM_MTYPE_NC: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); case AMDGPU_VM_MTYPE_WC: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_WC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_WC); + case AMDGPU_VM_MTYPE_RW: + return AMDGPU_PTE_MTYPE_VG10(MTYPE_RW); case AMDGPU_VM_MTYPE_CC: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_CC); case AMDGPU_VM_MTYPE_UC: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_UC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_UC); default: - pte_flag |= AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); - break; + return AMDGPU_PTE_MTYPE_VG10(MTYPE_NC); } - - if (flags & AMDGPU_VM_PAGE_PRT) - pte_flag |= AMDGPU_PTE_PRT; - - return pte_flag; } static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, @@ -648,12 +654,34 @@ static void gmc_v9_0_get_vm_pde(struct amdgpu_device *adev, int level, } } +static void gmc_v9_0_get_vm_pte(struct amdgpu_device *adev, + struct amdgpu_bo_va_mapping *mapping, + uint64_t *flags) +{ + *flags &= ~AMDGPU_PTE_EXECUTABLE; + *flags |= mapping->flags & AMDGPU_PTE_EXECUTABLE; + + *flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; + *flags |= mapping->flags & AMDGPU_PTE_MTYPE_VG10_MASK; + + if (mapping->flags & AMDGPU_PTE_PRT) { + *flags |= AMDGPU_PTE_PRT; + *flags &= ~AMDGPU_PTE_VALID; + } + + if (adev->asic_type == CHIP_ARCTURUS && + !(*flags & AMDGPU_PTE_SYSTEM) && + mapping->bo_va->is_xgmi) + *flags |= AMDGPU_PTE_SNOOPED; +} + static const struct amdgpu_gmc_funcs gmc_v9_0_gmc_funcs = { .flush_gpu_tlb = gmc_v9_0_flush_gpu_tlb, .emit_flush_gpu_tlb = gmc_v9_0_emit_flush_gpu_tlb, .emit_pasid_mapping = gmc_v9_0_emit_pasid_mapping, - .get_vm_pte_flags = gmc_v9_0_get_vm_pte_flags, - .get_vm_pde = gmc_v9_0_get_vm_pde + .map_mtype = gmc_v9_0_map_mtype, + .get_vm_pde = gmc_v9_0_get_vm_pde, + .get_vm_pte = gmc_v9_0_get_vm_pte }; static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) @@ -664,6 +692,9 @@ static void gmc_v9_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { + case CHIP_VEGA10: + adev->umc.funcs = &umc_v6_0_funcs; + break; case CHIP_VEGA20: adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; adev->umc.channel_inst_num = UMC_V6_1_CHANNEL_INSTANCE_NUM; @@ -681,7 +712,7 @@ static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_VEGA20: - adev->mmhub_funcs = &mmhub_v1_0_funcs; + adev->mmhub.funcs = &mmhub_v1_0_funcs; break; default: break; @@ -762,140 +793,10 @@ static int gmc_v9_0_allocate_vm_inv_eng(struct amdgpu_device *adev) return 0; } -static int gmc_v9_0_ecc_ras_block_late_init(void *handle, - struct ras_fs_if *fs_info, struct ras_common_if *ras_block) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ras_common_if **ras_if = NULL; - struct ras_ih_if ih_info = { - .cb = gmc_v9_0_process_ras_data_cb, - }; - int r; - - if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) - ras_if = &adev->gmc.umc_ras_if; - else if (ras_block->block == AMDGPU_RAS_BLOCK__MMHUB) - ras_if = &adev->gmc.mmhub_ras_if; - else - BUG(); - - if (!amdgpu_ras_is_supported(adev, ras_block->block)) { - amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); - return 0; - } - - /* handle resume path. */ - if (*ras_if) { - /* resend ras TA enable cmd during resume. - * prepare to handle failure. - */ - ih_info.head = **ras_if; - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - /* request a gpu reset. will run again. */ - amdgpu_ras_request_reset_on_boot(adev, - ras_block->block); - return 0; - } - /* fail to enable ras, cleanup all. */ - goto irq; - } - /* enable successfully. continue. */ - goto resume; - } - - *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL); - if (!*ras_if) - return -ENOMEM; - - **ras_if = *ras_block; - - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - amdgpu_ras_request_reset_on_boot(adev, - ras_block->block); - r = 0; - } - goto feature; - } - - ih_info.head = **ras_if; - fs_info->head = **ras_if; - - if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) { - r = amdgpu_ras_interrupt_add_handler(adev, &ih_info); - if (r) - goto interrupt; - } - - amdgpu_ras_debugfs_create(adev, fs_info); - - r = amdgpu_ras_sysfs_create(adev, fs_info); - if (r) - goto sysfs; -resume: - if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) { - r = amdgpu_irq_get(adev, &adev->gmc.ecc_irq, 0); - if (r) - goto irq; - } - - return 0; -irq: - amdgpu_ras_sysfs_remove(adev, *ras_if); -sysfs: - amdgpu_ras_debugfs_remove(adev, *ras_if); - if (ras_block->block == AMDGPU_RAS_BLOCK__UMC) - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); -interrupt: - amdgpu_ras_feature_enable(adev, *ras_if, 0); -feature: - kfree(*ras_if); - *ras_if = NULL; - return r; -} - -static int gmc_v9_0_ecc_late_init(void *handle) -{ - int r; - - struct ras_fs_if umc_fs_info = { - .sysfs_name = "umc_err_count", - .debugfs_name = "umc_err_inject", - }; - struct ras_common_if umc_ras_block = { - .block = AMDGPU_RAS_BLOCK__UMC, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .sub_block_index = 0, - .name = "umc", - }; - struct ras_fs_if mmhub_fs_info = { - .sysfs_name = "mmhub_err_count", - .debugfs_name = "mmhub_err_inject", - }; - struct ras_common_if mmhub_ras_block = { - .block = AMDGPU_RAS_BLOCK__MMHUB, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .sub_block_index = 0, - .name = "mmhub", - }; - - r = gmc_v9_0_ecc_ras_block_late_init(handle, - &umc_fs_info, &umc_ras_block); - if (r) - return r; - - r = gmc_v9_0_ecc_ras_block_late_init(handle, - &mmhub_fs_info, &mmhub_ras_block); - return r; -} - static int gmc_v9_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - bool r; + int r; if (!gmc_v9_0_keep_stolen_memory(adev)) amdgpu_bo_late_init(adev); @@ -929,7 +830,7 @@ static int gmc_v9_0_late_init(void *handle) } } - r = gmc_v9_0_ecc_late_init(handle); + r = amdgpu_gmc_ras_late_init(adev); if (r) return r; @@ -970,33 +871,11 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev, */ static int gmc_v9_0_mc_init(struct amdgpu_device *adev) { - int chansize, numchan; int r; - if (amdgpu_sriov_vf(adev)) { - /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, - * and DF related registers is not readable, seems hardcord is the - * only way to set the correct vram_width - */ - adev->gmc.vram_width = 2048; - } else if (amdgpu_emu_mode != 1) { - adev->gmc.vram_width = amdgpu_atomfirmware_get_vram_width(adev); - } - - if (!adev->gmc.vram_width) { - /* hbm memory channel size */ - if (adev->flags & AMD_IS_APU) - chansize = 64; - else - chansize = 128; - - numchan = adev->df_funcs->get_hbm_channel_number(adev); - adev->gmc.vram_width = numchan * chansize; - } - /* size in MB on si */ adev->gmc.mc_vram_size = - adev->nbio_funcs->get_memsize(adev) * 1024ULL * 1024ULL; + adev->nbio.funcs->get_memsize(adev) * 1024ULL * 1024ULL; adev->gmc.real_vram_size = adev->gmc.mc_vram_size; if (!(adev->flags & AMD_IS_APU)) { @@ -1108,7 +987,7 @@ static unsigned gmc_v9_0_get_vbios_fb_size(struct amdgpu_device *adev) static int gmc_v9_0_sw_init(void *handle) { - int r; + int r, vram_width = 0, vram_type = 0, vram_vendor = 0; struct amdgpu_device *adev = (struct amdgpu_device *)handle; gfxhub_v1_0_init(adev); @@ -1119,7 +998,32 @@ static int gmc_v9_0_sw_init(void *handle) spin_lock_init(&adev->gmc.invalidate_lock); - adev->gmc.vram_type = amdgpu_atomfirmware_get_vram_type(adev); + r = amdgpu_atomfirmware_get_vram_info(adev, + &vram_width, &vram_type, &vram_vendor); + if (amdgpu_sriov_vf(adev)) + /* For Vega10 SR-IOV, vram_width can't be read from ATOM as RAVEN, + * and DF related registers is not readable, seems hardcord is the + * only way to set the correct vram_width + */ + adev->gmc.vram_width = 2048; + else if (amdgpu_emu_mode != 1) + adev->gmc.vram_width = vram_width; + + if (!adev->gmc.vram_width) { + int chansize, numchan; + + /* hbm memory channel size */ + if (adev->flags & AMD_IS_APU) + chansize = 64; + else + chansize = 128; + + numchan = adev->df_funcs->get_hbm_channel_number(adev); + adev->gmc.vram_width = numchan * chansize; + } + + adev->gmc.vram_type = vram_type; + adev->gmc.vram_vendor = vram_vendor; switch (adev->asic_type) { case CHIP_RAVEN: adev->num_vmhubs = 2; @@ -1240,33 +1144,7 @@ static int gmc_v9_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; void *stolen_vga_buf; - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC) && - adev->gmc.umc_ras_if) { - struct ras_common_if *ras_if = adev->gmc.umc_ras_if; - struct ras_ih_if ih_info = { - .head = *ras_if, - }; - - /* remove fs first */ - amdgpu_ras_debugfs_remove(adev, ras_if); - amdgpu_ras_sysfs_remove(adev, ras_if); - /* remove the IH */ - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); - amdgpu_ras_feature_enable(adev, ras_if, 0); - kfree(ras_if); - } - - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB) && - adev->gmc.mmhub_ras_if) { - struct ras_common_if *ras_if = adev->gmc.mmhub_ras_if; - - /* remove fs and disable ras feature */ - amdgpu_ras_debugfs_remove(adev, ras_if); - amdgpu_ras_sysfs_remove(adev, ras_if); - amdgpu_ras_feature_enable(adev, ras_if, 0); - kfree(ras_if); - } - + amdgpu_gmc_ras_fini(adev); amdgpu_gem_force_release(adev); amdgpu_vm_manager_fini(adev); @@ -1316,13 +1194,7 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev) */ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) { - int r, i; - bool value; - u32 tmp; - - amdgpu_device_program_register_sequence(adev, - golden_settings_vega10_hdp, - ARRAY_SIZE(golden_settings_vega10_hdp)); + int r; if (adev->gart.bo == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); @@ -1332,15 +1204,6 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) if (r) return r; - switch (adev->asic_type) { - case CHIP_RAVEN: - /* TODO for renoir */ - mmhub_v1_0_update_power_gating(adev, true); - break; - default: - break; - } - r = gfxhub_v1_0_gart_enable(adev); if (r) return r; @@ -1352,6 +1215,49 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) if (r) return r; + DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", + (unsigned)(adev->gmc.gart_size >> 20), + (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); + adev->gart.ready = true; + return 0; +} + +static int gmc_v9_0_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool value; + int r, i; + u32 tmp; + + /* The sequence of these two function calls matters.*/ + gmc_v9_0_init_golden_registers(adev); + + if (adev->mode_info.num_crtc) { + if (adev->asic_type != CHIP_ARCTURUS) { + /* Lockout access through VGA aperture*/ + WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); + + /* disable VGA render */ + WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); + } + } + + amdgpu_device_program_register_sequence(adev, + golden_settings_vega10_hdp, + ARRAY_SIZE(golden_settings_vega10_hdp)); + + switch (adev->asic_type) { + case CHIP_RAVEN: + /* TODO for renoir */ + mmhub_v1_0_update_power_gating(adev, true); + break; + case CHIP_ARCTURUS: + WREG32_FIELD15(HDP, 0, HDP_MMHUB_CNTL, HDP_MMHUB_GCC, 1); + break; + default: + break; + } + WREG32_FIELD15(HDP, 0, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 1); tmp = RREG32_SOC15(HDP, 0, mmHDP_HOST_PATH_CNTL); @@ -1361,7 +1267,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); /* After HDP is initialized, flush HDP.*/ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) value = false; @@ -1377,28 +1283,8 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) for (i = 0; i < adev->num_vmhubs; ++i) gmc_v9_0_flush_gpu_tlb(adev, 0, i, 0); - DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", - (unsigned)(adev->gmc.gart_size >> 20), - (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); - adev->gart.ready = true; - return 0; -} - -static int gmc_v9_0_hw_init(void *handle) -{ - int r; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - /* The sequence of these two function calls matters.*/ - gmc_v9_0_init_golden_registers(adev); - - if (adev->mode_info.num_crtc) { - /* Lockout access through VGA aperture*/ - WREG32_FIELD15(DCE, 0, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); - - /* disable VGA render */ - WREG32_FIELD15(DCE, 0, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); - } + if (adev->umc.funcs && adev->umc.funcs->init_registers) + adev->umc.funcs->init_registers(adev); r = gmc_v9_0_gart_enable(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 04cd4b6f95d4..28105e4af507 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -206,6 +206,8 @@ static void mmhub_v1_0_enable_system_domain(struct amdgpu_device *adev) tmp = RREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(MMHUB, 0, mmVM_CONTEXT0_CNTL, tmp); } @@ -418,6 +420,8 @@ void mmhub_v1_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, 0, mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(MMHUB, 0, mmVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = @@ -616,5 +620,6 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev, } const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = { + .ras_late_init = amdgpu_mmhub_ras_late_init, .query_ras_error_count = mmhub_v1_0_query_ras_error_count, }; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index b39bea6f54e9..a7cb185d639a 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -31,20 +31,25 @@ #include "soc15_common.h" -static void mmhub_v2_0_init_gart_pt_regs(struct amdgpu_device *adev) +void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base) { - uint64_t value = amdgpu_gmc_pd_addr(adev->gart.bo); + /* two registers distance between mmMMVM_CONTEXT0_* to mmMMVM_CONTEXT1_* */ + int offset = mmMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 + - mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, - lower_32_bits(value)); + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + offset * vmid, lower_32_bits(page_table_base)); - WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, - upper_32_bits(value)); + WREG32_SOC15_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + offset * vmid, upper_32_bits(page_table_base)); } static void mmhub_v2_0_init_gart_aperture_regs(struct amdgpu_device *adev) { - mmhub_v2_0_init_gart_pt_regs(adev); + uint64_t pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + mmhub_v2_0_setup_vm_pt_regs(adev, 0, pt_base); WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, (u32)(adev->gmc.gart_start >> 12)); @@ -161,6 +166,8 @@ static void mmhub_v2_0_enable_system_domain(struct amdgpu_device *adev) tmp = RREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15(MMHUB, 0, mmMMVM_CONTEXT0_CNTL, tmp); } @@ -341,6 +348,8 @@ void mmhub_v2_0_init(struct amdgpu_device *adev) hub->ctx0_ptb_addr_hi32 = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); + hub->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_SEM); hub->vm_inv_eng0_req = SOC15_REG_OFFSET(MMHUB, 0, mmMMVM_INVALIDATE_ENG0_REQ); hub->vm_inv_eng0_ack = diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h index db16f3ece218..3ea4344f0315 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.h @@ -31,5 +31,7 @@ void mmhub_v2_0_init(struct amdgpu_device *adev); int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, enum amd_clockgating_state state); void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u32 *flags); +void mmhub_v2_0_setup_vm_pt_regs(struct amdgpu_device *adev, uint32_t vmid, + uint64_t page_table_base); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index 9ed178fa241c..66efe2f7bd76 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -249,6 +249,8 @@ static void mmhub_v9_4_enable_system_domain(struct amdgpu_device *adev, hubid * MMHUB_INSTANCE_REGISTER_OFFSET); tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); + tmp = REG_SET_FIELD(tmp, VML2VC0_VM_CONTEXT0_CNTL, + RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); WREG32_SOC15_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_CNTL, hubid * MMHUB_INSTANCE_REGISTER_OFFSET, tmp); } @@ -502,6 +504,10 @@ void mmhub_v9_4_init(struct amdgpu_device *adev) SOC15_REG_OFFSET(MMHUB, 0, mmVML2VC0_VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32) + i * MMHUB_INSTANCE_REGISTER_OFFSET; + hub[i]->vm_inv_eng0_sem = + SOC15_REG_OFFSET(MMHUB, 0, + mmVML2VC0_VM_INVALIDATE_ENG0_SEM) + + i * MMHUB_INSTANCE_REGISTER_OFFSET; hub[i]->vm_inv_eng0_req = SOC15_REG_OFFSET(MMHUB, 0, mmVML2VC0_VM_INVALIDATE_ENG0_REQ) + diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c new file mode 100644 index 000000000000..0d8767eb7a70 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -0,0 +1,380 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "nbio/nbio_2_3_offset.h" +#include "nbio/nbio_2_3_sh_mask.h" +#include "gc/gc_10_1_0_offset.h" +#include "gc/gc_10_1_0_sh_mask.h" +#include "soc15.h" +#include "navi10_ih.h" +#include "soc15_common.h" +#include "mxgpu_nv.h" +#include "mxgpu_ai.h" + +static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev) +{ + WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2); +} + +static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val) +{ + WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0); +} + +/* + * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine + * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1 + * by host. + * + * if called no in IRQ routine, this peek_msg cannot guaranteed to return the + * correct value since it doesn't return the RCV_DW0 under the case that + * RCV_MSG_VALID is set by host. + */ +static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev) +{ + return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0)); +} + + +static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev, + enum idh_event event) +{ + u32 reg; + + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW0)); + if (reg != event) + return -ENOENT; + + xgpu_nv_mailbox_send_ack(adev); + + return 0; +} + +static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev) +{ + return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2; +} + +static int xgpu_nv_poll_ack(struct amdgpu_device *adev) +{ + int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT; + u8 reg; + + do { + reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE); + if (reg & 2) + return 0; + + mdelay(5); + timeout -= 5; + } while (timeout > 1); + + pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT); + + return -ETIME; +} + +static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event) +{ + int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT; + + do { + r = xgpu_nv_mailbox_rcv_msg(adev, event); + if (!r) + return 0; + + msleep(10); + timeout -= 10; + } while (timeout > 1); + + pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r); + + return -ETIME; +} + +static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev, + enum idh_request req, u32 data1, u32 data2, u32 data3) +{ + u32 reg; + int r; + uint8_t trn; + + /* IMPORTANT: + * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK + * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK + * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack() + * will return immediatly + */ + do { + xgpu_nv_mailbox_set_valid(adev, false); + trn = xgpu_nv_peek_ack(adev); + if (trn) { + pr_err("trn=%x ACK should not assert! wait again !\n", trn); + msleep(1); + } + } while (trn); + + reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0)); + reg = REG_SET_FIELD(reg, BIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0, + MSGBUF_DATA, req); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW0), + reg); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW1), + data1); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW2), + data2); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_MSGBUF_TRN_DW3), + data3); + + xgpu_nv_mailbox_set_valid(adev, true); + + /* start to poll ack */ + r = xgpu_nv_poll_ack(adev); + if (r) + pr_err("Doesn't get ack from pf, continue\n"); + + xgpu_nv_mailbox_set_valid(adev, false); +} + +static int xgpu_nv_send_access_requests(struct amdgpu_device *adev, + enum idh_request req) +{ + int r; + + xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0); + + /* start to check msg if request is idh_req_gpu_init_access */ + if (req == IDH_REQ_GPU_INIT_ACCESS || + req == IDH_REQ_GPU_FINI_ACCESS || + req == IDH_REQ_GPU_RESET_ACCESS) { + r = xgpu_nv_poll_msg(adev, IDH_READY_TO_ACCESS_GPU); + if (r) { + pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n"); + return r; + } + /* Retrieve checksum from mailbox2 */ + if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) { + adev->virt.fw_reserve.checksum_key = + RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, + mmBIF_BX_PF_MAILBOX_MSGBUF_RCV_DW2)); + } + } + + return 0; +} + +static int xgpu_nv_request_reset(struct amdgpu_device *adev) +{ + return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS); +} + +static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev, + bool init) +{ + enum idh_request req; + + req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS; + return xgpu_nv_send_access_requests(adev, req); +} + +static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev, + bool init) +{ + enum idh_request req; + int r = 0; + + req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS; + r = xgpu_nv_send_access_requests(adev, req); + + return r; +} + +static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + DRM_DEBUG("get ack intr and do nothing.\n"); + return 0; +} + +static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL)); + + tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, ACK_INT_EN, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp); + + return 0; +} + +static void xgpu_nv_mailbox_flr_work(struct work_struct *work) +{ + struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work); + struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt); + int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT; + int locked; + + /* block amdgpu_gpu_recover till msg FLR COMPLETE received, + * otherwise the mailbox msg will be ruined/reseted by + * the VF FLR. + * + * we can unlock the lock_reset to allow "amdgpu_job_timedout" + * to run gpu_recover() after FLR_NOTIFICATION_CMPL received + * which means host side had finished this VF's FLR. + */ + locked = mutex_trylock(&adev->lock_reset); + if (locked) + adev->in_gpu_reset = 1; + + do { + if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL) + goto flr_done; + + msleep(10); + timeout -= 10; + } while (timeout > 1); + +flr_done: + if (locked) { + adev->in_gpu_reset = 0; + mutex_unlock(&adev->lock_reset); + } + + /* Trigger recovery for world switch failure if no TDR */ + if (amdgpu_device_should_recover_gpu(adev)) + amdgpu_device_gpu_recover(adev, NULL); +} + +static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL)); + + tmp = REG_SET_FIELD(tmp, BIF_BX_PF_MAILBOX_INT_CNTL, VALID_INT_EN, + (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0); + WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_INT_CNTL), tmp); + + return 0; +} + +static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + enum idh_event event = xgpu_nv_mailbox_peek_msg(adev); + + switch (event) { + case IDH_FLR_NOTIFICATION: + if (amdgpu_sriov_runtime(adev)) + schedule_work(&adev->virt.flr_work); + break; + /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore + * it byfar since that polling thread will handle it, + * other msg like flr complete is not handled here. + */ + case IDH_CLR_MSG_BUF: + case IDH_FLR_NOTIFICATION_CMPL: + case IDH_READY_TO_ACCESS_GPU: + default: + break; + } + + return 0; +} + +static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = { + .set = xgpu_nv_set_mailbox_ack_irq, + .process = xgpu_nv_mailbox_ack_irq, +}; + +static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = { + .set = xgpu_nv_set_mailbox_rcv_irq, + .process = xgpu_nv_mailbox_rcv_irq, +}; + +void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->virt.ack_irq.num_types = 1; + adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs; + adev->virt.rcv_irq.num_types = 1; + adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs; +} + +int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq); + if (r) + return r; + + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq); + if (r) { + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); + return r; + } + + return 0; +} + +int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev) +{ + int r; + + r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0); + if (r) + return r; + r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0); + if (r) { + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); + return r; + } + + INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work); + + return 0; +} + +void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev) +{ + amdgpu_irq_put(adev, &adev->virt.ack_irq, 0); + amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0); +} + +const struct amdgpu_virt_ops xgpu_nv_virt_ops = { + .req_full_gpu = xgpu_nv_request_full_gpu_access, + .rel_full_gpu = xgpu_nv_release_full_gpu_access, + .reset_gpu = xgpu_nv_request_reset, + .wait_reset = NULL, + .trans_msg = xgpu_nv_mailbox_trans_msg, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h new file mode 100644 index 000000000000..99b15f6865cb --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.h @@ -0,0 +1,41 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __MXGPU_NV_H__ +#define __MXGPU_NV_H__ + +#define NV_MAILBOX_POLL_ACK_TIMEDOUT 500 +#define NV_MAILBOX_POLL_MSG_TIMEDOUT 12000 +#define NV_MAILBOX_POLL_FLR_TIMEDOUT 500 + +extern const struct amdgpu_virt_ops xgpu_nv_virt_ops; + +void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev); +int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev); +int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev); +void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev); + +#define NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4) +#define NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE (SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF_MAILBOX_CONTROL) * 4 + 1) + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c index 9fe08408db58..9af73567e716 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c @@ -117,7 +117,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) /* disable irqs */ navi10_ih_disable_interrupts(adev); - adev->nbio_funcs->ih_control(adev); + adev->nbio.funcs->ih_control(adev); /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ WREG32_SOC15(OSSSYS, 0, mmIH_RB_BASE, ih->gpu_addr >> 8); @@ -162,7 +162,7 @@ static int navi10_ih_irq_init(struct amdgpu_device *adev) } WREG32_SOC15(OSSSYS, 0, mmIH_DOORBELL_RPTR, ih_doorbell_rtpr); - adev->nbio_funcs->ih_doorbell_range(adev, ih->use_doorbell, + adev->nbio.funcs->ih_doorbell_range(adev, ih->use_doorbell, ih->doorbell_index); tmp = RREG32_SOC15(OSSSYS, 0, mmIH_STORM_CLIENT_LIST_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c index a56c93620e78..88efaecf9f70 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/navi10_reg_init.c @@ -24,7 +24,6 @@ #include "nv.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "navi10_ip_offset.h" int navi10_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c index cadc7603ca41..a786d159e5e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/navi12_reg_init.c @@ -24,7 +24,6 @@ #include "nv.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "navi12_ip_offset.h" int navi12_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c index 3b5f0f65e096..4ea1e8fbb601 100644 --- a/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/navi14_reg_init.c @@ -24,7 +24,6 @@ #include "nv.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "navi14_ip_offset.h" int navi14_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c index c05d78d4efc6..f3a3fe746222 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.c @@ -27,11 +27,21 @@ #include "nbio/nbio_2_3_default.h" #include "nbio/nbio_2_3_offset.h" #include "nbio/nbio_2_3_sh_mask.h" +#include <uapi/linux/kfd_ioctl.h> #define smnPCIE_CONFIG_CNTL 0x11180044 #define smnCPM_CONTROL 0x11180460 #define smnPCIE_CNTL2 0x11180070 + +static void nbio_v2_3_remap_hdp_registers(struct amdgpu_device *adev) +{ + WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); + WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL, + adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); +} + static u32 nbio_v2_3_get_rev_id(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); @@ -56,10 +66,9 @@ static void nbio_v2_3_hdp_flush(struct amdgpu_device *adev, struct amdgpu_ring *ring) { if (!ring || !ring->funcs->emit_wreg) - WREG32_SOC15_NO_KIQ(NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL, 0); + WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); else - amdgpu_ring_emit_wreg(ring, SOC15_REG_OFFSET( - NBIO, 0, mmBIF_BX_PF_HDP_MEM_COHERENCY_FLUSH_CNTL), 0); + amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); } static u32 nbio_v2_3_get_memsize(struct amdgpu_device *adev) @@ -311,7 +320,6 @@ static void nbio_v2_3_init_registers(struct amdgpu_device *adev) } const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { - .hdp_flush_reg = &nbio_v2_3_hdp_flush_reg, .get_hdp_flush_req_offset = nbio_v2_3_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v2_3_get_hdp_flush_done_offset, .get_pcie_index_offset = nbio_v2_3_get_pcie_index_offset, @@ -331,4 +339,5 @@ const struct amdgpu_nbio_funcs nbio_v2_3_funcs = { .ih_control = nbio_v2_3_ih_control, .init_registers = nbio_v2_3_init_registers, .detect_hw_virt = nbio_v2_3_detect_hw_virt, + .remap_hdp_registers = nbio_v2_3_remap_hdp_registers, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h index 5ae52085f6b7..a43b60acf7f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v2_3.h @@ -26,6 +26,7 @@ #include "soc15_common.h" +extern const struct nbio_hdp_flush_reg nbio_v2_3_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v2_3_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c index 6590143c3f75..635d9e1fc0a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c @@ -226,7 +226,7 @@ static u32 nbio_v6_1_get_pcie_data_offset(struct amdgpu_device *adev) return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); } -static const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { +const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = { .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK, .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK, .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK, @@ -277,7 +277,6 @@ static void nbio_v6_1_init_registers(struct amdgpu_device *adev) } const struct amdgpu_nbio_funcs nbio_v6_1_funcs = { - .hdp_flush_reg = &nbio_v6_1_hdp_flush_reg, .get_hdp_flush_req_offset = nbio_v6_1_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v6_1_get_hdp_flush_done_offset, .get_pcie_index_offset = nbio_v6_1_get_pcie_index_offset, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h index 0743a6f016f3..6dc743b73218 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.h @@ -26,6 +26,7 @@ #include "soc15_common.h" +extern const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v6_1_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c index 74eecb768a82..d6cbf26074bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.c @@ -292,7 +292,6 @@ static void nbio_v7_0_init_registers(struct amdgpu_device *adev) } const struct amdgpu_nbio_funcs nbio_v7_0_funcs = { - .hdp_flush_reg = &nbio_v7_0_hdp_flush_reg, .get_hdp_flush_req_offset = nbio_v7_0_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_0_get_hdp_flush_done_offset, .get_pcie_index_offset = nbio_v7_0_get_pcie_index_offset, diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h index 508d549c5029..e7aefb252550 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_0.h @@ -26,6 +26,7 @@ #include "soc15_common.h" +extern const struct nbio_hdp_flush_reg nbio_v7_0_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v7_0_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index 910fffced43b..0db458f9fafc 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -23,10 +23,12 @@ #include "amdgpu.h" #include "amdgpu_atombios.h" #include "nbio_v7_4.h" +#include "amdgpu_ras.h" #include "nbio/nbio_7_4_offset.h" #include "nbio/nbio_7_4_sh_mask.h" #include "nbio/nbio_7_4_0_smn.h" +#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" #include <uapi/linux/kfd_ioctl.h> #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c @@ -266,7 +268,7 @@ static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev) return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); } -static const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { +const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK, .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK, .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK, @@ -306,17 +308,208 @@ static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev) static void nbio_v7_4_init_registers(struct amdgpu_device *adev) { - uint32_t def, data; - def = data = RREG32_PCIE(smnPCIE_CI_CNTL); - data = REG_SET_FIELD(data, PCIE_CI_CNTL, CI_SLV_ORDERING_DIS, 1); +} - if (def != data) - WREG32_PCIE(smnPCIE_CI_CNTL, data); +static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) +{ + uint32_t bif_doorbell_intr_cntl; + + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, + RAS_CNTLR_INTERRUPT_CLEAR, 1); + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + amdgpu_ras_global_ras_isr(adev); + } +} + +static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev) +{ + uint32_t bif_doorbell_intr_cntl; + + bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); + if (REG_GET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { + /* driver has to clear the interrupt status when bif ring is disabled */ + bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, + BIF_DOORBELL_INT_CNTL, + RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); + WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); + + amdgpu_ras_global_ras_isr(adev); + } +} + + +static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + /* The ras_controller_irq enablement should be done in psp bl when it + * tries to enable ras feature. Driver only need to set the correct interrupt + * vector for bare-metal and sriov use case respectively + */ + uint32_t bif_intr_cntl; + + bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); + if (state == AMDGPU_IRQ_STATE_ENABLE) { + /* set interrupt vector select bit to 0 to select + * vetcor 1 for bare metal case */ + bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl, + BIF_INTR_CNTL, + RAS_INTR_VEC_SEL, 0); + WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); + } + + return 0; +} + +static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for ras_controller_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + /* The ras_controller_irq enablement should be done in psp bl when it + * tries to enable ras feature. Driver only need to set the correct interrupt + * vector for bare-metal and sriov use case respectively + */ + uint32_t bif_intr_cntl; + + bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); + if (state == AMDGPU_IRQ_STATE_ENABLE) { + /* set interrupt vector select bit to 0 to select + * vetcor 1 for bare metal case */ + bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl, + BIF_INTR_CNTL, + RAS_INTR_VEC_SEL, 0); + WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); + } + + return 0; +} + +static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + /* By design, the ih cookie for err_event_athub_irq should be written + * to BIFring instead of general iv ring. However, due to known bif ring + * hw bug, it has to be disabled. There is no chance the process function + * will be involked. Just left it as a dummy one. + */ + return 0; +} + +static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = { + .set = nbio_v7_4_set_ras_controller_irq_state, + .process = nbio_v7_4_process_ras_controller_irq, +}; + +static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = { + .set = nbio_v7_4_set_ras_err_event_athub_irq_state, + .process = nbio_v7_4_process_err_event_athub_irq, +}; + +static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev) +{ + int r; + + /* init the irq funcs */ + adev->nbio.ras_controller_irq.funcs = + &nbio_v7_4_ras_controller_irq_funcs; + adev->nbio.ras_controller_irq.num_types = 1; + + /* register ras controller interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT, + &adev->nbio.ras_controller_irq); + if (r) + return r; + + return 0; +} + +static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev) +{ + + int r; + + /* init the irq funcs */ + adev->nbio.ras_err_event_athub_irq.funcs = + &nbio_v7_4_ras_err_event_athub_irq_funcs; + adev->nbio.ras_err_event_athub_irq.num_types = 1; + + /* register ras err event athub interrupt */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, + NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, + &adev->nbio.ras_err_event_athub_irq); + if (r) + return r; + + return 0; +} + +static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + uint32_t global_sts, central_sts, int_eoi; + uint32_t corr, fatal, non_fatal; + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO); + corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr); + fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal); + non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, + ParityErrNonFatal); + + if (corr) + err_data->ce_count++; + if (fatal) + err_data->ue_count++; + + if (corr || fatal || non_fatal) { + central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS); + /* clear error status register */ + WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts); + + if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS, + BIFL_RasContller_Intr_Recv)) { + /* clear interrupt status register */ + WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts); + int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI); + int_eoi = REG_SET_FIELD(int_eoi, + IOHC_INTERRUPT_EOI, SMI_EOI, 1); + WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi); + } + } +} + +static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev, + bool enable) +{ + WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL, + DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1); } const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { - .hdp_flush_reg = &nbio_v7_4_hdp_flush_reg, .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset, .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset, .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset, @@ -330,6 +523,7 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture, .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture, .ih_doorbell_range = nbio_v7_4_ih_doorbell_range, + .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt, .update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating, .update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep, .get_clockgating_state = nbio_v7_4_get_clockgating_state, @@ -337,4 +531,10 @@ const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { .init_registers = nbio_v7_4_init_registers, .detect_hw_virt = nbio_v7_4_detect_hw_virt, .remap_hdp_registers = nbio_v7_4_remap_hdp_registers, + .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring, + .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring, + .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt, + .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt, + .query_ras_error_count = nbio_v7_4_query_ras_error_count, + .ras_late_init = amdgpu_nbio_ras_late_init, }; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h index c442865bac4f..b1ac82872752 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h @@ -26,6 +26,7 @@ #include "soc15_common.h" +extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg; extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index de9b995b65b1..0ba66bef5746 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -40,12 +40,14 @@ #include "gc/gc_10_1_0_sh_mask.h" #include "hdp/hdp_5_0_0_offset.h" #include "hdp/hdp_5_0_0_sh_mask.h" +#include "smuio/smuio_11_0_0_offset.h" #include "soc15.h" #include "soc15_common.h" #include "gmc_v10_0.h" #include "gfxhub_v2_0.h" #include "mmhub_v2_0.h" +#include "nbio_v2_3.h" #include "nv.h" #include "navi10_ih.h" #include "gfx_v10_0.h" @@ -53,6 +55,7 @@ #include "vcn_v2_0.h" #include "dce_virtual.h" #include "mes_v10_1.h" +#include "mxgpu_nv.h" static const struct amd_ip_funcs nv_common_ip_funcs; @@ -63,8 +66,8 @@ static u32 nv_pcie_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; u32 r; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -78,8 +81,8 @@ static void nv_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -119,7 +122,7 @@ static void nv_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) static u32 nv_get_config_memsize(struct amdgpu_device *adev) { - return adev->nbio_funcs->get_memsize(adev); + return adev->nbio.funcs->get_memsize(adev); } static u32 nv_get_xclk(struct amdgpu_device *adev) @@ -154,8 +157,27 @@ static bool nv_read_disabled_bios(struct amdgpu_device *adev) static bool nv_read_bios_from_rom(struct amdgpu_device *adev, u8 *bios, u32 length_bytes) { - /* TODO: will implement it when SMU header is available */ - return false; + u32 *dw_ptr; + u32 i, length_dw; + + if (bios == NULL) + return false; + if (length_bytes == 0) + return false; + /* APU vbios image is part of sbios image */ + if (adev->flags & AMD_IS_APU) + return false; + + dw_ptr = (u32 *)bios; + length_dw = ALIGN(length_bytes, 4) / 4; + + /* set rom index to 0 */ + WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); + /* read out the rom data */ + for (i = 0; i < length_dw; i++) + dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); + + return true; } static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { @@ -176,6 +198,7 @@ static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, @@ -279,7 +302,7 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - u32 memsize = adev->nbio_funcs->get_memsize(adev); + u32 memsize = adev->nbio.funcs->get_memsize(adev); if (memsize != 0xffffffff) break; @@ -296,7 +319,7 @@ nv_asic_reset_method(struct amdgpu_device *adev) { struct smu_context *smu = &adev->smu; - if (smu_baco_is_support(smu)) + if (!amdgpu_sriov_vf(adev) && smu_baco_is_support(smu)) return AMD_RESET_METHOD_BACO; else return AMD_RESET_METHOD_MODE1; @@ -368,8 +391,8 @@ static void nv_program_aspm(struct amdgpu_device *adev) static void nv_enable_doorbell_aperture(struct amdgpu_device *adev, bool enable) { - adev->nbio_funcs->enable_doorbell_aperture(adev, enable); - adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); + adev->nbio.funcs->enable_doorbell_aperture(adev, enable); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); } static const struct amdgpu_ip_block_version nv_common_ip_block = @@ -423,9 +446,13 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) if (r) return r; - adev->nbio_funcs = &nbio_v2_3_funcs; + adev->nbio.funcs = &nbio_v2_3_funcs; + adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg; - adev->nbio_funcs->detect_hw_virt(adev); + adev->nbio.funcs->detect_hw_virt(adev); + + if (amdgpu_sriov_vf(adev)) + adev->virt.ops = &xgpu_nv_virt_ops; switch (adev->asic_type) { case CHIP_NAVI10: @@ -435,7 +462,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) + is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -446,7 +473,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev)) + is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); if (adev->enable_mes) @@ -458,7 +485,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block); amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP && - is_support_sw_smu(adev)) + is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); @@ -469,7 +496,7 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block); if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT && - is_support_sw_smu(adev)) + is_support_sw_smu(adev) && !amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block); break; @@ -482,12 +509,12 @@ int nv_set_ip_blocks(struct amdgpu_device *adev) static uint32_t nv_get_rev_id(struct amdgpu_device *adev) { - return adev->nbio_funcs->get_rev_id(adev); + return adev->nbio.funcs->get_rev_id(adev); } static void nv_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - adev->nbio_funcs->hdp_flush(adev, ring); + adev->nbio.funcs->hdp_flush(adev, ring); } static void nv_invalidate_hdp(struct amdgpu_device *adev, @@ -532,6 +559,16 @@ static bool nv_need_reset_on_init(struct amdgpu_device *adev) return false; } +static uint64_t nv_get_pcie_replay_count(struct amdgpu_device *adev) +{ + + /* TODO + * dummy implement for pcie_replay_count sysfs interface + * */ + + return 0; +} + static void nv_init_doorbell_index(struct amdgpu_device *adev) { adev->doorbell_index.kiq = AMDGPU_NAVI10_DOORBELL_KIQ; @@ -579,12 +616,16 @@ static const struct amdgpu_asic_funcs nv_asic_funcs = .need_full_reset = &nv_need_full_reset, .get_pcie_usage = &nv_get_pcie_usage, .need_reset_on_init = &nv_need_reset_on_init, + .get_pcie_replay_count = &nv_get_pcie_replay_count, }; static int nv_common_early_init(void *handle) { +#define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) struct amdgpu_device *adev = (struct amdgpu_device *)handle; + adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; + adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; adev->smc_rreg = NULL; adev->smc_wreg = NULL; adev->pcie_rreg = &nv_pcie_rreg; @@ -667,16 +708,31 @@ static int nv_common_early_init(void *handle) return -EINVAL; } + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_init_setting(adev); + xgpu_nv_mailbox_set_irq_funcs(adev); + } + return 0; } static int nv_common_late_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + xgpu_nv_mailbox_get_irq(adev); + return 0; } static int nv_common_sw_init(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (amdgpu_sriov_vf(adev)) + xgpu_nv_mailbox_add_irq_id(adev); + return 0; } @@ -694,7 +750,13 @@ static int nv_common_hw_init(void *handle) /* enable aspm */ nv_program_aspm(adev); /* setup nbio registers */ - adev->nbio_funcs->init_registers(adev); + adev->nbio.funcs->init_registers(adev); + /* remap HDP registers to a hole in mmio space, + * for the purpose of expose those registers + * to process space + */ + if (adev->nbio.funcs->remap_hdp_registers) + adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ nv_enable_doorbell_aperture(adev, true); @@ -856,9 +918,9 @@ static int nv_common_set_clockgating_state(void *handle, case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: - adev->nbio_funcs->update_medium_grain_clock_gating(adev, + adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - adev->nbio_funcs->update_medium_grain_light_sleep(adev, + adev->nbio.funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); nv_update_hdp_mem_power_gating(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -886,7 +948,7 @@ static void nv_common_get_clockgating_state(void *handle, u32 *flags) if (amdgpu_sriov_vf(adev)) *flags = 0; - adev->nbio_funcs->get_clockgating_state(adev, flags); + adev->nbio.funcs->get_clockgating_state(adev, flags); /* AMD_CG_SUPPORT_HDP_MGCG */ tmp = RREG32_SOC15(HDP, 0, mmHDP_CLK_CNTL); diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c index 5d95e614369a..b345e69ba246 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c @@ -40,6 +40,9 @@ MODULE_FIRMWARE("amdgpu/raven_asd.bin"); MODULE_FIRMWARE("amdgpu/picasso_asd.bin"); MODULE_FIRMWARE("amdgpu/raven2_asd.bin"); +MODULE_FIRMWARE("amdgpu/picasso_ta.bin"); +MODULE_FIRMWARE("amdgpu/raven2_ta.bin"); +MODULE_FIRMWARE("amdgpu/raven_ta.bin"); static int psp_v10_0_init_microcode(struct psp_context *psp) { @@ -48,7 +51,7 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) char fw_name[30]; int err = 0; const struct psp_firmware_header_v1_0 *hdr; - + const struct ta_firmware_header_v1_0 *ta_hdr; DRM_DEBUG("\n"); switch (adev->asic_type) { @@ -79,7 +82,45 @@ static int psp_v10_0_init_microcode(struct psp_context *psp) adev->psp.asd_start_addr = (uint8_t *)hdr + le32_to_cpu(hdr->header.ucode_array_offset_bytes); + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); + err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); + if (err) { + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; + dev_info(adev->dev, + "psp v10.0: Failed to load firmware \"%s\"\n", + fw_name); + } else { + err = amdgpu_ucode_validate(adev->psp.ta_fw); + if (err) + goto out2; + + ta_hdr = (const struct ta_firmware_header_v1_0 *) + adev->psp.ta_fw->data; + adev->psp.ta_hdcp_ucode_version = + le32_to_cpu(ta_hdr->ta_hdcp_ucode_version); + adev->psp.ta_hdcp_ucode_size = + le32_to_cpu(ta_hdr->ta_hdcp_size_bytes); + adev->psp.ta_hdcp_start_addr = + (uint8_t *)ta_hdr + + le32_to_cpu(ta_hdr->header.ucode_array_offset_bytes); + + adev->psp.ta_fw_version = le32_to_cpu(ta_hdr->header.ucode_version); + + adev->psp.ta_dtm_ucode_version = + le32_to_cpu(ta_hdr->ta_dtm_ucode_version); + adev->psp.ta_dtm_ucode_size = + le32_to_cpu(ta_hdr->ta_dtm_size_bytes); + adev->psp.ta_dtm_start_addr = + (uint8_t *)adev->psp.ta_hdcp_start_addr + + le32_to_cpu(ta_hdr->ta_dtm_offset_bytes); + } + return 0; + +out2: + release_firmware(adev->psp.ta_fw); + adev->psp.ta_fw = NULL; out: if (err) { dev_err(adev->dev, @@ -228,6 +269,7 @@ static int psp_v10_0_cmd_submit(struct psp_context *psp, write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); write_frame->fence_value = index; + amdgpu_asic_flush_hdp(adev, NULL); /* Update the write Pointer in DWORDs */ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index 10166104b8a3..ffeaa2f5588d 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -49,6 +49,7 @@ MODULE_FIRMWARE("amdgpu/navi12_sos.bin"); MODULE_FIRMWARE("amdgpu/navi12_asd.bin"); MODULE_FIRMWARE("amdgpu/arcturus_sos.bin"); MODULE_FIRMWARE("amdgpu/arcturus_asd.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_ta.bin"); /* address block */ #define smnMP1_FIRMWARE_FLAGS 0x3010024 @@ -57,6 +58,8 @@ MODULE_FIRMWARE("amdgpu/arcturus_asd.bin"); #define mmRLC_GPM_UCODE_DATA_NV10 0x5b62 #define mmSDMA0_UCODE_ADDR_NV10 0x5880 #define mmSDMA0_UCODE_DATA_NV10 0x5881 +/* memory training timeout define */ +#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000 static int psp_v11_0_init_microcode(struct psp_context *psp) { @@ -155,6 +158,7 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) switch (adev->asic_type) { case CHIP_VEGA20: + case CHIP_ARCTURUS: snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); if (err) { @@ -182,7 +186,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: - case CHIP_ARCTURUS: break; default: BUG(); @@ -205,18 +208,26 @@ out: return err; } +static bool psp_v11_0_is_sos_alive(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + uint32_t sol_reg; + + sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); + + return sol_reg != 0x0; +} + static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) { int ret; uint32_t psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - uint32_t sol_reg; /* Check tOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) { + if (psp_v11_0_is_sos_alive(psp)) { psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version); return 0; @@ -233,7 +244,7 @@ static int psp_v11_0_bootloader_load_kdb(struct psp_context *psp) /* Copy PSP KDB binary to memory */ memcpy(psp->fw_pri_buf, psp->kdb_start_addr, psp->kdb_bin_size); - /* Provide the sys driver to bootloader */ + /* Provide the PSP KDB to bootloader */ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, (uint32_t)(psp->fw_pri_mc_addr >> 20)); psp_gfxdrv_command_reg = PSP_BL__LOAD_KEY_DATABASE; @@ -252,13 +263,11 @@ static int psp_v11_0_bootloader_load_sysdrv(struct psp_context *psp) int ret; uint32_t psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - uint32_t sol_reg; /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) { + if (psp_v11_0_is_sos_alive(psp)) { psp->sos_fw_version = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_58); dev_info(adev->dev, "sos fw version = 0x%x.\n", psp->sos_fw_version); return 0; @@ -296,13 +305,11 @@ static int psp_v11_0_bootloader_load_sos(struct psp_context *psp) int ret; unsigned int psp_gfxdrv_command_reg = 0; struct amdgpu_device *adev = psp->adev; - uint32_t sol_reg; /* Check sOS sign of life register to confirm sys driver and sOS * are already been loaded. */ - sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); - if (sol_reg) + if (psp_v11_0_is_sos_alive(psp)) return 0; /* Wait for bootloader to signify that is ready having bit 31 of C2PMSG_35 set to 1 */ @@ -398,6 +405,34 @@ static bool psp_v11_0_support_vmr_ring(struct psp_context *psp) return false; } +static int psp_v11_0_ring_stop(struct psp_context *psp, + enum psp_ring_type ring_type) +{ + int ret = 0; + struct amdgpu_device *adev = psp->adev; + + /* Write the ring destroy command*/ + if (psp_v11_0_support_vmr_ring(psp)) + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, + GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); + else + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, + GFX_CTRL_CMD_ID_DESTROY_RINGS); + + /* there might be handshake issue with hardware which needs delay */ + mdelay(20); + + /* Wait for response flag (bit 31) */ + if (psp_v11_0_support_vmr_ring(psp)) + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), + 0x80000000, 0x80000000, false); + else + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + + return ret; +} + static int psp_v11_0_ring_create(struct psp_context *psp, enum psp_ring_type ring_type) { @@ -407,6 +442,12 @@ static int psp_v11_0_ring_create(struct psp_context *psp, struct amdgpu_device *adev = psp->adev; if (psp_v11_0_support_vmr_ring(psp)) { + ret = psp_v11_0_ring_stop(psp, ring_type); + if (ret) { + DRM_ERROR("psp_v11_0_ring_stop_sriov failed!\n"); + return ret; + } + /* Write low address of the ring to C2PMSG_102 */ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_102, psp_ring_reg); @@ -426,6 +467,14 @@ static int psp_v11_0_ring_create(struct psp_context *psp, 0x80000000, 0x8000FFFF, false); } else { + /* Wait for sOS ready for ring creation */ + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), + 0x80000000, 0x80000000, false); + if (ret) { + DRM_ERROR("Failed to wait for sOS ready for ring creation\n"); + return ret; + } + /* Write low address of the ring to C2PMSG_69 */ psp_ring_reg = lower_32_bits(ring->ring_mem_mc_addr); WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_69, psp_ring_reg); @@ -451,33 +500,6 @@ static int psp_v11_0_ring_create(struct psp_context *psp, return ret; } -static int psp_v11_0_ring_stop(struct psp_context *psp, - enum psp_ring_type ring_type) -{ - int ret = 0; - struct amdgpu_device *adev = psp->adev; - - /* Write the ring destroy command*/ - if (psp_v11_0_support_vmr_ring(psp)) - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_101, - GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING); - else - WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_64, - GFX_CTRL_CMD_ID_DESTROY_RINGS); - - /* there might be handshake issue with hardware which needs delay */ - mdelay(20); - - /* Wait for response flag (bit 31) */ - if (psp_v11_0_support_vmr_ring(psp)) - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_101), - 0x80000000, 0x80000000, false); - else - ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_64), - 0x80000000, 0x80000000, false); - - return ret; -} static int psp_v11_0_ring_destroy(struct psp_context *psp, enum psp_ring_type ring_type) @@ -541,6 +563,7 @@ static int psp_v11_0_cmd_submit(struct psp_context *psp, write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); write_frame->fence_value = index; + amdgpu_asic_flush_hdp(adev, NULL); /* Update the write Pointer in DWORDs */ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; @@ -889,6 +912,162 @@ static int psp_v11_0_rlc_autoload_start(struct psp_context *psp) return psp_rlc_autoload_start(psp); } +static int psp_v11_0_memory_training_send_msg(struct psp_context *psp, int msg) +{ + int ret; + int i; + uint32_t data_32; + int max_wait; + struct amdgpu_device *adev = psp->adev; + + data_32 = (psp->mem_train_ctx.c2p_train_data_offset >> 20); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, data_32); + WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, msg); + + max_wait = MEM_TRAIN_SEND_MSG_TIMEOUT_US / adev->usec_timeout; + for (i = 0; i < max_wait; i++) { + ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35), + 0x80000000, 0x80000000, false); + if (ret == 0) + break; + } + if (i < max_wait) + ret = 0; + else + ret = -ETIME; + + DRM_DEBUG("training %s %s, cost %d @ %d ms\n", + (msg == PSP_BL__DRAM_SHORT_TRAIN) ? "short" : "long", + (ret == 0) ? "succeed" : "failed", + i, adev->usec_timeout/1000); + return ret; +} + +static void psp_v11_0_memory_training_fini(struct psp_context *psp) +{ + struct psp_memory_training_context *ctx = &psp->mem_train_ctx; + + ctx->init = PSP_MEM_TRAIN_NOT_SUPPORT; + kfree(ctx->sys_cache); + ctx->sys_cache = NULL; +} + +static int psp_v11_0_memory_training_init(struct psp_context *psp) +{ + int ret; + struct psp_memory_training_context *ctx = &psp->mem_train_ctx; + + if (ctx->init != PSP_MEM_TRAIN_RESERVE_SUCCESS) { + DRM_DEBUG("memory training is not supported!\n"); + return 0; + } + + ctx->sys_cache = kzalloc(ctx->train_data_size, GFP_KERNEL); + if (ctx->sys_cache == NULL) { + DRM_ERROR("alloc mem_train_ctx.sys_cache failed!\n"); + ret = -ENOMEM; + goto Err_out; + } + + DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n", + ctx->train_data_size, + ctx->p2c_train_data_offset, + ctx->c2p_train_data_offset); + ctx->init = PSP_MEM_TRAIN_INIT_SUCCESS; + return 0; + +Err_out: + psp_v11_0_memory_training_fini(psp); + return ret; +} + +/* + * save and restore proces + */ +static int psp_v11_0_memory_training(struct psp_context *psp, uint32_t ops) +{ + int ret; + uint32_t p2c_header[4]; + struct psp_memory_training_context *ctx = &psp->mem_train_ctx; + uint32_t *pcache = (uint32_t*)ctx->sys_cache; + + if (ctx->init == PSP_MEM_TRAIN_NOT_SUPPORT) { + DRM_DEBUG("Memory training is not supported.\n"); + return 0; + } else if (ctx->init != PSP_MEM_TRAIN_INIT_SUCCESS) { + DRM_ERROR("Memory training initialization failure.\n"); + return -EINVAL; + } + + if (psp_v11_0_is_sos_alive(psp)) { + DRM_DEBUG("SOS is alive, skip memory training.\n"); + return 0; + } + + amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, p2c_header, sizeof(p2c_header), false); + DRM_DEBUG("sys_cache[%08x,%08x,%08x,%08x] p2c_header[%08x,%08x,%08x,%08x]\n", + pcache[0], pcache[1], pcache[2], pcache[3], + p2c_header[0], p2c_header[1], p2c_header[2], p2c_header[3]); + + if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) { + DRM_DEBUG("Short training depends on restore.\n"); + ops |= PSP_MEM_TRAIN_RESTORE; + } + + if ((ops & PSP_MEM_TRAIN_RESTORE) && + pcache[0] != MEM_TRAIN_SYSTEM_SIGNATURE) { + DRM_DEBUG("sys_cache[0] is invalid, restore depends on save.\n"); + ops |= PSP_MEM_TRAIN_SAVE; + } + + if (p2c_header[0] == MEM_TRAIN_SYSTEM_SIGNATURE && + !(pcache[0] == MEM_TRAIN_SYSTEM_SIGNATURE && + pcache[3] == p2c_header[3])) { + DRM_DEBUG("sys_cache is invalid or out-of-date, need save training data to sys_cache.\n"); + ops |= PSP_MEM_TRAIN_SAVE; + } + + if ((ops & PSP_MEM_TRAIN_SAVE) && + p2c_header[0] != MEM_TRAIN_SYSTEM_SIGNATURE) { + DRM_DEBUG("p2c_header[0] is invalid, save depends on long training.\n"); + ops |= PSP_MEM_TRAIN_SEND_LONG_MSG; + } + + if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { + ops &= ~PSP_MEM_TRAIN_SEND_SHORT_MSG; + ops |= PSP_MEM_TRAIN_SAVE; + } + + DRM_DEBUG("Memory training ops:%x.\n", ops); + + if (ops & PSP_MEM_TRAIN_SEND_LONG_MSG) { + ret = psp_v11_0_memory_training_send_msg(psp, PSP_BL__DRAM_LONG_TRAIN); + if (ret) { + DRM_ERROR("Send long training msg failed.\n"); + return ret; + } + } + + if (ops & PSP_MEM_TRAIN_SAVE) { + amdgpu_device_vram_access(psp->adev, ctx->p2c_train_data_offset, ctx->sys_cache, ctx->train_data_size, false); + } + + if (ops & PSP_MEM_TRAIN_RESTORE) { + amdgpu_device_vram_access(psp->adev, ctx->c2p_train_data_offset, ctx->sys_cache, ctx->train_data_size, true); + } + + if (ops & PSP_MEM_TRAIN_SEND_SHORT_MSG) { + ret = psp_v11_0_memory_training_send_msg(psp, (amdgpu_force_long_training > 0) ? + PSP_BL__DRAM_LONG_TRAIN : PSP_BL__DRAM_SHORT_TRAIN); + if (ret) { + DRM_ERROR("send training msg failed.\n"); + return ret; + } + } + ctx->training_cnt++; + return 0; +} + static const struct psp_funcs psp_v11_0_funcs = { .init_microcode = psp_v11_0_init_microcode, .bootloader_load_kdb = psp_v11_0_bootloader_load_kdb, @@ -909,6 +1088,9 @@ static const struct psp_funcs psp_v11_0_funcs = { .ras_trigger_error = psp_v11_0_ras_trigger_error, .ras_cure_posion = psp_v11_0_ras_cure_posion, .rlc_autoload_start = psp_v11_0_rlc_autoload_start, + .mem_training_init = psp_v11_0_memory_training_init, + .mem_training_fini = psp_v11_0_memory_training_fini, + .mem_training = psp_v11_0_memory_training, }; void psp_v11_0_set_psp_funcs(struct psp_context *psp) diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c index c72e43f8e0be..8f553f6f92d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v12_0.c @@ -378,6 +378,7 @@ static int psp_v12_0_cmd_submit(struct psp_context *psp, write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); write_frame->fence_value = index; + amdgpu_asic_flush_hdp(adev, NULL); /* Update the write Pointer in DWORDs */ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index d2c727f6a8bd..fdc00938327b 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -454,6 +454,7 @@ static int psp_v3_1_cmd_submit(struct psp_context *psp, write_frame->fence_addr_hi = upper_32_bits(fence_mc_addr); write_frame->fence_addr_lo = lower_32_bits(fence_mc_addr); write_frame->fence_value = index; + amdgpu_asic_flush_hdp(adev, NULL); /* Update the write Pointer in DWORDs */ psp_write_ptr_reg = (psp_write_ptr_reg + rb_frame_size_dw) % ring_size_dw; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 4554e72c8378..4ef4d31f5231 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -747,13 +747,13 @@ static void sdma_v4_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask = 0; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0 << ring->me; sdma_v4_0_wait_reg_mem(ring, 0, 1, - adev->nbio_funcs->get_hdp_flush_done_offset(adev), - adev->nbio_funcs->get_hdp_flush_req_offset(adev), + adev->nbio.funcs->get_hdp_flush_done_offset(adev), + adev->nbio.funcs->get_hdp_flush_req_offset(adev), ref_and_mask, ref_and_mask, 10); } @@ -1691,102 +1691,17 @@ static int sdma_v4_0_early_init(void *handle) } static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, + void *err_data, struct amdgpu_iv_entry *entry); static int sdma_v4_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct ras_common_if **ras_if = &adev->sdma.ras_if; struct ras_ih_if ih_info = { .cb = sdma_v4_0_process_ras_data_cb, }; - struct ras_fs_if fs_info = { - .sysfs_name = "sdma_err_count", - .debugfs_name = "sdma_err_inject", - }; - struct ras_common_if ras_block = { - .block = AMDGPU_RAS_BLOCK__SDMA, - .type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE, - .sub_block_index = 0, - .name = "sdma", - }; - int r, i; - - if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) { - amdgpu_ras_feature_enable_on_boot(adev, &ras_block, 0); - return 0; - } - - /* handle resume path. */ - if (*ras_if) { - /* resend ras TA enable cmd during resume. - * prepare to handle failure. - */ - ih_info.head = **ras_if; - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - /* request a gpu reset. will run again. */ - amdgpu_ras_request_reset_on_boot(adev, - AMDGPU_RAS_BLOCK__SDMA); - return 0; - } - /* fail to enable ras, cleanup all. */ - goto irq; - } - /* enable successfully. continue. */ - goto resume; - } - - *ras_if = kmalloc(sizeof(**ras_if), GFP_KERNEL); - if (!*ras_if) - return -ENOMEM; - - **ras_if = ras_block; - - r = amdgpu_ras_feature_enable_on_boot(adev, *ras_if, 1); - if (r) { - if (r == -EAGAIN) { - amdgpu_ras_request_reset_on_boot(adev, - AMDGPU_RAS_BLOCK__SDMA); - r = 0; - } - goto feature; - } - ih_info.head = **ras_if; - fs_info.head = **ras_if; - - r = amdgpu_ras_interrupt_add_handler(adev, &ih_info); - if (r) - goto interrupt; - - amdgpu_ras_debugfs_create(adev, &fs_info); - - r = amdgpu_ras_sysfs_create(adev, &fs_info); - if (r) - goto sysfs; -resume: - for (i = 0; i < adev->sdma.num_instances; i++) { - r = amdgpu_irq_get(adev, &adev->sdma.ecc_irq, - AMDGPU_SDMA_IRQ_INSTANCE0 + i); - if (r) - goto irq; - } - - return 0; -irq: - amdgpu_ras_sysfs_remove(adev, *ras_if); -sysfs: - amdgpu_ras_debugfs_remove(adev, *ras_if); - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); -interrupt: - amdgpu_ras_feature_enable(adev, *ras_if, 0); -feature: - kfree(*ras_if); - *ras_if = NULL; - return r; + return amdgpu_sdma_ras_late_init(adev, &ih_info); } static int sdma_v4_0_sw_init(void *handle) @@ -1858,21 +1773,7 @@ static int sdma_v4_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA) && - adev->sdma.ras_if) { - struct ras_common_if *ras_if = adev->sdma.ras_if; - struct ras_ih_if ih_info = { - .head = *ras_if, - }; - - /*remove fs first*/ - amdgpu_ras_debugfs_remove(adev, ras_if); - amdgpu_ras_sysfs_remove(adev, ras_if); - /*remove the IH*/ - amdgpu_ras_interrupt_remove_handler(adev, &ih_info); - amdgpu_ras_feature_enable(adev, ras_if, 0); - kfree(ras_if); - } + amdgpu_sdma_ras_fini(adev); for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_ring_fini(&adev->sdma.instance[i].ring); @@ -1892,7 +1793,7 @@ static int sdma_v4_0_hw_init(void *handle) if ((adev->asic_type == CHIP_RAVEN && adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_powergating_by_smu) || - adev->asic_type == CHIP_RENOIR) + (adev->asic_type == CHIP_RENOIR && !adev->in_gpu_reset)) amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_SDMA, false); if (!amdgpu_sriov_vf(adev)) @@ -2025,52 +1926,28 @@ static int sdma_v4_0_process_trap_irq(struct amdgpu_device *adev, } static int sdma_v4_0_process_ras_data_cb(struct amdgpu_device *adev, - struct ras_err_data *err_data, + void *err_data, struct amdgpu_iv_entry *entry) { - uint32_t err_source; int instance; + /* When “Full RAS” is enabled, the per-IP interrupt sources should + * be disabled and the driver should only look for the aggregated + * interrupt via sync flood + */ + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) + goto out; + instance = sdma_v4_0_irq_id_to_seq(entry->client_id); if (instance < 0) - return 0; - - switch (entry->src_id) { - case SDMA0_4_0__SRCID__SDMA_SRAM_ECC: - err_source = 0; - break; - case SDMA0_4_0__SRCID__SDMA_ECC: - err_source = 1; - break; - default: - return 0; - } - - kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); + goto out; - amdgpu_ras_reset_gpu(adev, 0); + amdgpu_sdma_process_ras_data_cb(adev, err_data, entry); +out: return AMDGPU_RAS_SUCCESS; } -static int sdma_v4_0_process_ecc_irq(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - struct ras_common_if *ras_if = adev->sdma.ras_if; - struct ras_dispatch_if ih_data = { - .entry = entry, - }; - - if (!ras_if) - return 0; - - ih_data.head = *ras_if; - - amdgpu_ras_interrupt_dispatch(adev, &ih_data); - return 0; -} - static int sdma_v4_0_process_illegal_inst_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) @@ -2418,7 +2295,7 @@ static const struct amdgpu_irq_src_funcs sdma_v4_0_illegal_inst_irq_funcs = { static const struct amdgpu_irq_src_funcs sdma_v4_0_ecc_irq_funcs = { .set = sdma_v4_0_set_ecc_irq_state, - .process = sdma_v4_0_process_ecc_irq, + .process = amdgpu_sdma_process_ecc_irq, }; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c index 8493bfbbc148..f4ad2990f973 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c @@ -406,7 +406,7 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { struct amdgpu_device *adev = ring->adev; u32 ref_and_mask = 0; - const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio_funcs->hdp_flush_reg; + const struct nbio_hdp_flush_reg *nbio_hf_reg = adev->nbio.hdp_flush_reg; if (ring->me == 0) ref_and_mask = nbio_hf_reg->ref_and_mask_sdma0; @@ -416,8 +416,8 @@ static void sdma_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_POLL_REGMEM) | SDMA_PKT_POLL_REGMEM_HEADER_HDP_FLUSH(1) | SDMA_PKT_POLL_REGMEM_HEADER_FUNC(3)); /* == */ - amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_done_offset(adev)) << 2); - amdgpu_ring_write(ring, (adev->nbio_funcs->get_hdp_flush_req_offset(adev)) << 2); + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_done_offset(adev)) << 2); + amdgpu_ring_write(ring, (adev->nbio.funcs->get_hdp_flush_req_offset(adev)) << 2); amdgpu_ring_write(ring, ref_and_mask); /* reference */ amdgpu_ring_write(ring, ref_and_mask); /* mask */ amdgpu_ring_write(ring, SDMA_PKT_POLL_REGMEM_DW5_RETRY_COUNT(0xfff) | @@ -683,7 +683,7 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev) WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL), doorbell); WREG32(sdma_v5_0_get_reg_offset(adev, i, mmSDMA0_GFX_DOORBELL_OFFSET), doorbell_offset); - adev->nbio_funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index, 20); if (amdgpu_sriov_vf(adev)) diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 493af42152f2..29024e64c886 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -975,6 +975,17 @@ static void si_smc_wreg(struct amdgpu_device *adev, u32 reg, u32 v) static struct amdgpu_allowed_register_entry si_allowed_read_registers[] = { {GRBM_STATUS}, + {mmGRBM_STATUS2}, + {mmGRBM_STATUS_SE0}, + {mmGRBM_STATUS_SE1}, + {mmSRBM_STATUS}, + {mmSRBM_STATUS2}, + {DMA_STATUS_REG + DMA0_REGISTER_OFFSET}, + {DMA_STATUS_REG + DMA1_REGISTER_OFFSET}, + {mmCP_STAT}, + {mmCP_STALLED_STAT1}, + {mmCP_STALLED_STAT2}, + {mmCP_STALLED_STAT3}, {GB_ADDR_CONFIG}, {MC_ARB_RAMCFG}, {GB_TILE_MODE0}, diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c index 57bb5f9e08b2..88ae27a5a03d 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c @@ -64,7 +64,8 @@ static int si_ih_irq_init(struct amdgpu_device *adev) u32 interrupt_cntl, ih_cntl, ih_rb_cntl; si_ih_disable_interrupts(adev); - WREG32(INTERRUPT_CNTL2, adev->irq.ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, adev->dummy_page_addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); interrupt_cntl &= ~IH_DUMMY_RD_OVERRIDE; interrupt_cntl &= ~IH_REQ_NONSNOOP_EN; diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 4ccfcdf8f16a..8e1640bc07af 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -58,6 +58,9 @@ #include "mmhub_v1_0.h" #include "df_v1_7.h" #include "df_v3_6.h" +#include "nbio_v6_1.h" +#include "nbio_v7_0.h" +#include "nbio_v7_4.h" #include "vega10_ih.h" #include "sdma_v4_0.h" #include "uvd_v7_0.h" @@ -91,8 +94,8 @@ static u32 soc15_pcie_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; u32 r; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -106,8 +109,8 @@ static void soc15_pcie_wreg(struct amdgpu_device *adev, u32 reg, u32 v) { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); WREG32(address, reg); @@ -121,8 +124,8 @@ static u64 soc15_pcie_rreg64(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; u64 r; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); /* read low 32 bit */ @@ -142,8 +145,8 @@ static void soc15_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) { unsigned long flags, address, data; - address = adev->nbio_funcs->get_pcie_index_offset(adev); - data = adev->nbio_funcs->get_pcie_data_offset(adev); + address = adev->nbio.funcs->get_pcie_index_offset(adev); + data = adev->nbio.funcs->get_pcie_data_offset(adev); spin_lock_irqsave(&adev->pcie_idx_lock, flags); /* write low 32 bit */ @@ -262,7 +265,7 @@ static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) static u32 soc15_get_config_memsize(struct amdgpu_device *adev) { - return adev->nbio_funcs->get_memsize(adev); + return adev->nbio.funcs->get_memsize(adev); } static u32 soc15_get_xclk(struct amdgpu_device *adev) @@ -336,6 +339,7 @@ static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, + { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, @@ -461,7 +465,7 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - u32 memsize = adev->nbio_funcs->get_memsize(adev); + u32 memsize = adev->nbio.funcs->get_memsize(adev); if (memsize != 0xffffffff) break; @@ -475,42 +479,66 @@ static int soc15_asic_mode1_reset(struct amdgpu_device *adev) static int soc15_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) { - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + if (is_support_sw_smu(adev)) { + struct smu_context *smu = &adev->smu; - if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { - *cap = false; - return -ENOENT; - } + *cap = smu_baco_is_support(smu); + return 0; + } else { + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { + *cap = false; + return -ENOENT; + } - return pp_funcs->get_asic_baco_capability(pp_handle, cap); + return pp_funcs->get_asic_baco_capability(pp_handle, cap); + } } static int soc15_asic_baco_reset(struct amdgpu_device *adev) { - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) - return -ENOENT; + /* avoid NBIF got stuck when do RAS recovery in BACO reset */ + if (ras && ras->supported) + adev->nbio.funcs->enable_doorbell_interrupt(adev, false); - /* enter BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 1)) - return -EIO; + dev_info(adev->dev, "GPU BACO reset\n"); - /* exit BACO state */ - if (pp_funcs->set_asic_baco_state(pp_handle, 0)) - return -EIO; + if (is_support_sw_smu(adev)) { + struct smu_context *smu = &adev->smu; - dev_info(adev->dev, "GPU BACO reset\n"); + if (smu_baco_reset(smu)) + return -EIO; + } else { + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - adev->in_baco_reset = 1; + if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* enter BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 1)) + return -EIO; + + /* exit BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 0)) + return -EIO; + } + + /* re-enable doorbell interrupt after BACO exit */ + if (ras && ras->supported) + adev->nbio.funcs->enable_doorbell_interrupt(adev, true); return 0; } static int soc15_mode2_reset(struct amdgpu_device *adev) { + if (is_support_sw_smu(adev)) + return smu_mode2_reset(&adev->smu); if (!adev->powerplay.pp_funcs || !adev->powerplay.pp_funcs->asic_reset_mode_2) return -ENOENT; @@ -525,6 +553,7 @@ soc15_asic_reset_method(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_RAVEN: + case CHIP_RENOIR: return AMD_RESET_METHOD_MODE2; case CHIP_VEGA10: case CHIP_VEGA12: @@ -626,8 +655,8 @@ static void soc15_program_aspm(struct amdgpu_device *adev) static void soc15_enable_doorbell_aperture(struct amdgpu_device *adev, bool enable) { - adev->nbio_funcs->enable_doorbell_aperture(adev, enable); - adev->nbio_funcs->enable_doorbell_selfring_aperture(adev, enable); + adev->nbio.funcs->enable_doorbell_aperture(adev, enable); + adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, enable); } static const struct amdgpu_ip_block_version vega10_common_ip_block = @@ -641,7 +670,7 @@ static const struct amdgpu_ip_block_version vega10_common_ip_block = static uint32_t soc15_get_rev_id(struct amdgpu_device *adev) { - return adev->nbio_funcs->get_rev_id(adev); + return adev->nbio.funcs->get_rev_id(adev); } int soc15_set_ip_blocks(struct amdgpu_device *adev) @@ -667,13 +696,17 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) adev->gmc.xgmi.supported = true; - if (adev->flags & AMD_IS_APU) - adev->nbio_funcs = &nbio_v7_0_funcs; - else if (adev->asic_type == CHIP_VEGA20 || - adev->asic_type == CHIP_ARCTURUS) - adev->nbio_funcs = &nbio_v7_4_funcs; - else - adev->nbio_funcs = &nbio_v6_1_funcs; + if (adev->flags & AMD_IS_APU) { + adev->nbio.funcs = &nbio_v7_0_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg; + } else if (adev->asic_type == CHIP_VEGA20 || + adev->asic_type == CHIP_ARCTURUS) { + adev->nbio.funcs = &nbio_v7_4_funcs; + adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg; + } else { + adev->nbio.funcs = &nbio_v6_1_funcs; + adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg; + } if (adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS) adev->df_funcs = &df_v3_6_funcs; @@ -681,7 +714,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) adev->df_funcs = &df_v1_7_funcs; adev->rev_id = soc15_get_rev_id(adev); - adev->nbio_funcs->detect_hw_virt(adev); + adev->nbio.funcs->detect_hw_virt(adev); if (amdgpu_sriov_vf(adev)) adev->virt.ops = &xgpu_ai_virt_ops; @@ -750,13 +783,26 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) case CHIP_ARCTURUS: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block); - amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + + if (amdgpu_sriov_vf(adev)) { + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + } else { + amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block); + if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) + amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block); + } + if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block); amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block); - amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); - amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); + if (!amdgpu_sriov_vf(adev)) + amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block); + + if (unlikely(adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT)) + amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block); break; case CHIP_RENOIR: amdgpu_device_ip_block_add(adev, &vega10_common_ip_block); @@ -785,7 +831,7 @@ int soc15_set_ip_blocks(struct amdgpu_device *adev) static void soc15_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring) { - adev->nbio_funcs->hdp_flush(adev, ring); + adev->nbio.funcs->hdp_flush(adev, ring); } static void soc15_invalidate_hdp(struct amdgpu_device *adev, @@ -1099,7 +1145,9 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; + adev->pg_flags = AMD_PG_SUPPORT_SDMA | + AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG; } else if (adev->pdev->device == 0x15d8) { adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | @@ -1142,7 +1190,9 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_VCN_MGCG; - adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; + adev->pg_flags = AMD_PG_SUPPORT_SDMA | + AMD_PG_SUPPORT_VCN | + AMD_PG_SUPPORT_VCN_DPG; } break; case CHIP_ARCTURUS: @@ -1157,7 +1207,8 @@ static int soc15_common_early_init(void *handle) AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS | AMD_CG_SUPPORT_MC_MGCG | - AMD_CG_SUPPORT_MC_LS; + AMD_CG_SUPPORT_MC_LS | + AMD_CG_SUPPORT_IH_CG; adev->pg_flags = 0; adev->external_rev_id = adev->rev_id + 0x32; break; @@ -1203,11 +1254,15 @@ static int soc15_common_early_init(void *handle) static int soc15_common_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r = 0; if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); - return 0; + if (adev->nbio.funcs->ras_late_init) + r = adev->nbio.funcs->ras_late_init(adev); + + return r; } static int soc15_common_sw_init(void *handle) @@ -1224,6 +1279,10 @@ static int soc15_common_sw_init(void *handle) static int soc15_common_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_nbio_ras_fini(adev); + adev->df_funcs->sw_fini(adev); return 0; } @@ -1236,12 +1295,12 @@ static void soc15_doorbell_range_init(struct amdgpu_device *adev) if (!amdgpu_sriov_vf(adev)) { for (i = 0; i < adev->sdma.num_instances; i++) { ring = &adev->sdma.instance[i].ring; - adev->nbio_funcs->sdma_doorbell_range(adev, i, + adev->nbio.funcs->sdma_doorbell_range(adev, i, ring->use_doorbell, ring->doorbell_index, adev->doorbell_index.sdma_doorbell_range); } - adev->nbio_funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, + adev->nbio.funcs->ih_doorbell_range(adev, adev->irq.ih.use_doorbell, adev->irq.ih.doorbell_index); } } @@ -1255,13 +1314,13 @@ static int soc15_common_hw_init(void *handle) /* enable aspm */ soc15_program_aspm(adev); /* setup nbio registers */ - adev->nbio_funcs->init_registers(adev); + adev->nbio.funcs->init_registers(adev); /* remap HDP registers to a hole in mmio space, * for the purpose of expose those registers * to process space */ - if (adev->nbio_funcs->remap_hdp_registers) - adev->nbio_funcs->remap_hdp_registers(adev); + if (adev->nbio.funcs->remap_hdp_registers) + adev->nbio.funcs->remap_hdp_registers(adev); /* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); @@ -1284,6 +1343,14 @@ static int soc15_common_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_put_irq(adev); + if (adev->nbio.ras_if && + amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { + if (adev->nbio.funcs->init_ras_controller_interrupt) + amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0); + if (adev->nbio.funcs->init_ras_err_event_athub_interrupt) + amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0); + } + return 0; } @@ -1424,9 +1491,9 @@ static int soc15_common_set_clockgating_state(void *handle, case CHIP_VEGA10: case CHIP_VEGA12: case CHIP_VEGA20: - adev->nbio_funcs->update_medium_grain_clock_gating(adev, + adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - adev->nbio_funcs->update_medium_grain_light_sleep(adev, + adev->nbio.funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); soc15_update_hdp_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -1441,9 +1508,9 @@ static int soc15_common_set_clockgating_state(void *handle, break; case CHIP_RAVEN: case CHIP_RENOIR: - adev->nbio_funcs->update_medium_grain_clock_gating(adev, + adev->nbio.funcs->update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE ? true : false); - adev->nbio_funcs->update_medium_grain_light_sleep(adev, + adev->nbio.funcs->update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); soc15_update_hdp_light_sleep(adev, state == AMD_CG_STATE_GATE ? true : false); @@ -1472,7 +1539,7 @@ static void soc15_common_get_clockgating_state(void *handle, u32 *flags) if (amdgpu_sriov_vf(adev)) *flags = 0; - adev->nbio_funcs->get_clockgating_state(adev, flags); + adev->nbio.funcs->get_clockgating_state(adev, flags); /* AMD_CG_SUPPORT_HDP_LS */ data = RREG32(SOC15_REG_OFFSET(HDP, 0, mmHDP_MEM_POWER_LS)); diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h index a3dde0c31f57..57af489a5de3 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15.h @@ -28,8 +28,8 @@ #include "nbio_v7_0.h" #include "nbio_v7_4.h" -#define SOC15_FLUSH_GPU_TLB_NUM_WREG 4 -#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 1 +#define SOC15_FLUSH_GPU_TLB_NUM_WREG 6 +#define SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT 3 extern const struct amd_ip_funcs soc15_common_ip_funcs; @@ -67,6 +67,8 @@ struct soc15_allowed_register_entry { #define SOC15_REG_GOLDEN_VALUE(ip, inst, reg, and_mask, or_mask) \ { ip##_HWIP, inst, reg##_BASE_IDX, reg, and_mask, or_mask } +#define SOC15_REG_FIELD(reg, field) reg##__##field##_MASK, reg##__##field##__SHIFT + void soc15_grbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); int soc15_set_ip_blocks(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c new file mode 100644 index 000000000000..0d6b50528d76 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.c @@ -0,0 +1,37 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "umc_v6_0.h" +#include "amdgpu.h" + +static void umc_v6_0_init_registers(struct amdgpu_device *adev) +{ + unsigned i,j; + + for (i = 0; i < 4; i++) + for (j = 0; j < 4; j++) + WREG32((i*0x100000 + 0x5010c + j*0x2000)/4, 0x1002); +} + +const struct amdgpu_umc_funcs umc_v6_0_funcs = { + .init_registers = umc_v6_0_init_registers, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h new file mode 100644 index 000000000000..109f1a57a46e --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_0.h @@ -0,0 +1,31 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __UMC_V6_0_H__ +#define __UMC_V6_0_H__ + +#include "soc15_common.h" +#include "amdgpu.h" + +extern const struct amdgpu_umc_funcs umc_v6_0_funcs; + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 8502e736f721..47c4b96b14d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -75,6 +75,17 @@ static void umc_v6_1_disable_umc_index_mode(struct amdgpu_device *adev) RSMU_UMC_INDEX_MODE_EN, 0); } +static uint32_t umc_v6_1_get_umc_inst(struct amdgpu_device *adev) +{ + uint32_t rsmu_umc_index; + + rsmu_umc_index = RREG32_SOC15(RSMU, 0, + mmRSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU); + return REG_GET_FIELD(rsmu_umc_index, + RSMU_UMC_INDEX_REGISTER_NBIF_VG20_GPU, + RSMU_UMC_INDEX_INSTANCE); +} + static void umc_v6_1_query_correctable_error_count(struct amdgpu_device *adev, uint32_t umc_reg_offset, unsigned long *error_count) @@ -165,7 +176,8 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, uint32_t umc_reg_offset, uint32_t channel_index) { uint32_t lsb, mc_umc_status_addr; - uint64_t mc_umc_status, err_addr; + uint64_t mc_umc_status, err_addr, retired_page; + struct eeprom_table_record *err_rec; mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_STATUST0); @@ -177,6 +189,7 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, return; } + err_rec = &err_data->err_addr[err_data->err_addr_cnt]; mc_umc_status = RREG64_UMC(mc_umc_status_addr + umc_reg_offset); /* calculate error address if ue/ce error is detected */ @@ -191,12 +204,24 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, err_addr &= ~((0x1ULL << lsb) - 1); /* translate umc channel address to soc pa, 3 parts are included */ - err_data->err_addr[err_data->err_addr_cnt] = - ADDR_OF_8KB_BLOCK(err_addr) | - ADDR_OF_256B_BLOCK(channel_index) | - OFFSET_IN_256B_BLOCK(err_addr); - - err_data->err_addr_cnt++; + retired_page = ADDR_OF_8KB_BLOCK(err_addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(err_addr); + + /* we only save ue error information currently, ce is skipped */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) + == 1) { + err_rec->address = err_addr; + /* page frame address is saved */ + err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec->ts = (uint64_t)ktime_get_real_seconds(); + err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = channel_index; + err_rec->mcumc_id = umc_v6_1_get_umc_inst(adev); + + err_data->err_addr_cnt++; + } } /* clear umc status */ @@ -209,7 +234,7 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev, amdgpu_umc_for_each_channel(umc_v6_1_query_error_address); } -static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev, +static void umc_v6_1_err_cnt_init_per_channel(struct amdgpu_device *adev, struct ras_err_data *err_data, uint32_t umc_reg_offset, uint32_t channel_index) { @@ -239,15 +264,16 @@ static void umc_v6_1_ras_init_per_channel(struct amdgpu_device *adev, WREG32(ecc_err_cnt_addr + umc_reg_offset, UMC_V6_1_CE_CNT_INIT); } -static void umc_v6_1_ras_init(struct amdgpu_device *adev) +static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev) { void *ras_error_status = NULL; - amdgpu_umc_for_each_channel(umc_v6_1_ras_init_per_channel); + amdgpu_umc_for_each_channel(umc_v6_1_err_cnt_init_per_channel); } const struct amdgpu_umc_funcs umc_v6_1_funcs = { - .ras_init = umc_v6_1_ras_init, + .err_cnt_init = umc_v6_1_err_cnt_init, + .ras_late_init = amdgpu_umc_ras_late_init, .query_ras_error_count = umc_v6_1_query_ras_error_count, .query_ras_error_address = umc_v6_1_query_ras_error_address, .enable_umc_index_mode = umc_v6_1_enable_umc_index_mode, diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c index 93b3500e522b..b4f84a820a44 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c @@ -202,7 +202,6 @@ static int vcn_v1_0_hw_init(void *handle) for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst->ring_enc[i]; - ring->sched.ready = true; r = amdgpu_ring_test_helper(ring); if (r) goto done; diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c index 36ad0c0e8efb..38f787a560cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c @@ -244,33 +244,24 @@ static int vcn_v2_0_hw_init(void *handle) struct amdgpu_ring *ring = &adev->vcn.inst->ring_dec; int i, r; - adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, ring->doorbell_index, 0); - ring->sched.ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst->ring_enc[i]; - ring->sched.ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } } ring = &adev->vcn.inst->ring_jpeg; - ring->sched.ready = true; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } done: if (!r) diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c index 395c2259f979..93edf9193a7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c @@ -25,6 +25,7 @@ #include "amdgpu.h" #include "amdgpu_vcn.h" +#include "amdgpu_pm.h" #include "soc15.h" #include "soc15d.h" #include "vcn_v2_0.h" @@ -255,32 +256,24 @@ static int vcn_v2_5_hw_init(void *handle) continue; ring = &adev->vcn.inst[j].ring_dec; - adev->nbio_funcs->vcn_doorbell_range(adev, ring->use_doorbell, + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, ring->doorbell_index, j); - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } for (i = 0; i < adev->vcn.num_enc_rings; ++i) { ring = &adev->vcn.inst[j].ring_enc[i]; - ring->sched.ready = false; - continue; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } } ring = &adev->vcn.inst[j].ring_jpeg; - r = amdgpu_ring_test_ring(ring); - if (r) { - ring->sched.ready = false; + r = amdgpu_ring_test_helper(ring); + if (r) goto done; - } } done: if (!r) @@ -300,7 +293,7 @@ static int vcn_v2_5_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ring *ring; - int i; + int i, j; for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) @@ -312,8 +305,8 @@ static int vcn_v2_5_hw_fini(void *handle) ring->sched.ready = false; - for (i = 0; i < adev->vcn.num_enc_rings; ++i) { - ring = &adev->vcn.inst[i].ring_enc[i]; + for (j = 0; j < adev->vcn.num_enc_rings; ++j) { + ring = &adev->vcn.inst[i].ring_enc[j]; ring->sched.ready = false; } @@ -423,7 +416,6 @@ static void vcn_v2_5_mc_resume(struct amdgpu_device *adev) * vcn_v2_5_disable_clock_gating - disable VCN clock gating * * @adev: amdgpu_device pointer - * @sw: enable SW clock gating * * Disable clock gating for VCN block */ @@ -542,7 +534,6 @@ static void vcn_v2_5_disable_clock_gating(struct amdgpu_device *adev) * vcn_v2_5_enable_clock_gating - enable VCN clock gating * * @adev: amdgpu_device pointer - * @sw: enable SW clock gating * * Enable clock gating for VCN block */ @@ -716,6 +707,9 @@ static int vcn_v2_5_start(struct amdgpu_device *adev) uint32_t rb_bufsz, tmp; int i, j, k, r; + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, true); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { if (adev->vcn.harvest_config & (1 << i)) continue; @@ -946,6 +940,9 @@ static int vcn_v2_5_stop(struct amdgpu_device *adev) ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); } + if (adev->pm.dpm_enabled) + amdgpu_dpm_enable_uvd(adev, false); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c index 9eae3536ddad..5cb7e231de5f 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c @@ -226,7 +226,7 @@ static int vega10_ih_irq_init(struct amdgpu_device *adev) /* disable irqs */ vega10_ih_disable_interrupts(adev); - adev->nbio_funcs->ih_control(adev); + adev->nbio.funcs->ih_control(adev); ih = &adev->irq.ih; /* Ring Buffer base. [39:8] of 40-bit address of the beginning of the ring buffer*/ @@ -675,10 +675,49 @@ static int vega10_ih_soft_reset(void *handle) return 0; } +static void vega10_ih_update_clockgating_state(struct amdgpu_device *adev, + bool enable) +{ + uint32_t data, def, field_val; + + if (adev->cg_flags & AMD_CG_SUPPORT_IH_CG) { + def = data = RREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL); + field_val = enable ? 0 : 1; + /** + * Vega10 does not have IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE + * and IH_BUFFER_MEM_CLK_SOFT_OVERRIDE field. + */ + if (adev->asic_type > CHIP_VEGA10) { + data = REG_SET_FIELD(data, IH_CLK_CTRL, + IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + IH_BUFFER_MEM_CLK_SOFT_OVERRIDE, field_val); + } + + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DBUS_MUX_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + OSSSYS_SHARE_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + LIMIT_SMN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + DYN_CLK_SOFT_OVERRIDE, field_val); + data = REG_SET_FIELD(data, IH_CLK_CTRL, + REG_CLK_SOFT_OVERRIDE, field_val); + if (def != data) + WREG32_SOC15(OSSSYS, 0, mmIH_CLK_CTRL, data); + } +} + static int vega10_ih_set_clockgating_state(void *handle, enum amd_clockgating_state state) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + vega10_ih_update_clockgating_state(adev, + state == AMD_CG_STATE_GATE ? true : false); return 0; + } static int vega10_ih_set_powergating_state(void *handle, diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c index bd0580334f83..6b52a539d51b 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega10_reg_init.c @@ -24,7 +24,6 @@ #include "soc15.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "vega10_ip_offset.h" int vega10_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c index 587e33f5dcce..556f854e3551 100644 --- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c +++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c @@ -24,7 +24,6 @@ #include "soc15.h" #include "soc15_common.h" -#include "soc15_hw_ip.h" #include "vega20_ip_offset.h" int vega20_reg_base_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 5f8c8786cac5..78e5cdc0c058 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -689,16 +689,50 @@ static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) return -EINVAL; } +int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap) +{ + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs || !pp_funcs->get_asic_baco_capability) { + *cap = false; + return -ENOENT; + } + + return pp_funcs->get_asic_baco_capability(pp_handle, cap); +} + +int smu7_asic_baco_reset(struct amdgpu_device *adev) +{ + void *pp_handle = adev->powerplay.pp_handle; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs ||!pp_funcs->get_asic_baco_state ||!pp_funcs->set_asic_baco_state) + return -ENOENT; + + /* enter BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 1)) + return -EIO; + + /* exit BACO state */ + if (pp_funcs->set_asic_baco_state(pp_handle, 0)) + return -EIO; + + dev_info(adev->dev, "GPU BACO reset\n"); + + return 0; +} + /** - * vi_asic_reset - soft reset GPU + * vi_asic_pci_config_reset - soft reset GPU * * @adev: amdgpu_device pointer * - * Look up which blocks are hung and attempt - * to reset them. + * Use PCI Config method to reset the GPU. + * * Returns 0 for success. */ -static int vi_asic_reset(struct amdgpu_device *adev) +static int vi_asic_pci_config_reset(struct amdgpu_device *adev) { int r; @@ -714,7 +748,47 @@ static int vi_asic_reset(struct amdgpu_device *adev) static enum amd_reset_method vi_asic_reset_method(struct amdgpu_device *adev) { - return AMD_RESET_METHOD_LEGACY; + bool baco_reset; + + switch (adev->asic_type) { + case CHIP_FIJI: + case CHIP_TONGA: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_TOPAZ: + smu7_asic_get_baco_capability(adev, &baco_reset); + break; + default: + baco_reset = false; + break; + } + + if (baco_reset) + return AMD_RESET_METHOD_BACO; + else + return AMD_RESET_METHOD_LEGACY; +} + +/** + * vi_asic_reset - soft reset GPU + * + * @adev: amdgpu_device pointer + * + * Look up which blocks are hung and attempt + * to reset them. + * Returns 0 for success. + */ +static int vi_asic_reset(struct amdgpu_device *adev) +{ + int r; + + if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) + r = smu7_asic_baco_reset(adev); + else + r = vi_asic_pci_config_reset(adev); + + return r; } static u32 vi_get_config_memsize(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h index 8de0772f986c..40d4174913a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.h +++ b/drivers/gpu/drm/amd/amdgpu/vi.h @@ -31,4 +31,7 @@ void vi_srbm_select(struct amdgpu_device *adev, int vi_set_ip_blocks(struct amdgpu_device *adev); void legacy_doorbell_index_init(struct amdgpu_device *adev); +int smu7_asic_get_baco_capability(struct amdgpu_device *adev, bool *cap); +int smu7_asic_baco_reset(struct amdgpu_device *adev); + #endif diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c index 177d1e5329a5..9f59ba93cfe0 100644 --- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c @@ -33,7 +33,9 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev, const struct cik_ih_ring_entry *ihre = (const struct cik_ih_ring_entry *)ih_ring_entry; const struct kfd2kgd_calls *f2g = dev->kfd2kgd; - unsigned int vmid, pasid; + unsigned int vmid; + uint16_t pasid; + bool ret; /* This workaround is due to HW/FW limitation on Hawaii that * VMID and PASID are not written into ih_ring_entry @@ -48,13 +50,13 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev, *tmp_ihre = *ihre; vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd); - pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid); + ret = f2g->get_atc_vmid_pasid_mapping_info(dev->kgd, vmid, &pasid); tmp_ihre->ring_id &= 0x000000ff; tmp_ihre->ring_id |= vmid << 8; tmp_ihre->ring_id |= pasid << 16; - return (pasid != 0) && + return ret && (pasid != 0) && vmid >= dev->vm_info.first_vmid_kfd && vmid <= dev->vm_info.last_vmid_kfd; } diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index 901fe3590165..d3400da6ab64 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -905,7 +905,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x7a5d0000, 0x807c817c, 0x807aff7a, 0x00000080, 0xbf0a717c, 0xbf85fff8, - 0xbf820141, 0xbef4037e, + 0xbf820142, 0xbef4037e, 0x8775ff7f, 0x0000ffff, 0x8875ff75, 0x00040000, 0xbef60380, 0xbef703ff, @@ -967,7 +967,7 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x725d0000, 0xe0304080, 0x725d0100, 0xe0304100, 0x725d0200, 0xe0304180, - 0x725d0300, 0xbf820031, + 0x725d0300, 0xbf820032, 0xbef603ff, 0x01000000, 0xbef20378, 0x8078ff78, 0x00000400, 0xbefc0384, @@ -992,83 +992,84 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0x725d0000, 0xe0304100, 0x725d0100, 0xe0304200, 0x725d0200, 0xe0304300, - 0x725d0300, 0xb9782a05, - 0x80788178, 0x907c9973, - 0x877c817c, 0xbf06817c, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb9721e06, 0x8f728a72, - 0x80787278, 0x8078ff78, - 0x00000200, 0x80f8ff78, - 0x00000050, 0xbef603ff, - 0x01000000, 0xbefc03ff, - 0x0000006c, 0x80f89078, - 0xf429003a, 0xf0000000, - 0xbf8cc07f, 0x80fc847c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0x80f8a078, - 0xf42d003a, 0xf0000000, - 0xbf8cc07f, 0x80fc887c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0x80f8c078, - 0xf431003a, 0xf0000000, - 0xbf8cc07f, 0x80fc907c, - 0xbf800000, 0xbe803100, - 0xbe823102, 0xbe843104, - 0xbe863106, 0xbe883108, - 0xbe8a310a, 0xbe8c310c, - 0xbe8e310e, 0xbf06807c, - 0xbf84fff0, 0xb9782a05, - 0x80788178, 0x907c9973, - 0x877c817c, 0xbf06817c, - 0xbf850002, 0x8f788978, - 0xbf820001, 0x8f788a78, - 0xb9721e06, 0x8f728a72, - 0x80787278, 0x8078ff78, - 0x00000200, 0xbef603ff, - 0x01000000, 0xf4211bfa, + 0x725d0300, 0xbf8c3f70, + 0xb9782a05, 0x80788178, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb9721e06, + 0x8f728a72, 0x80787278, + 0x8078ff78, 0x00000200, + 0x80f8ff78, 0x00000050, + 0xbef603ff, 0x01000000, + 0xbefc03ff, 0x0000006c, + 0x80f89078, 0xf429003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc847c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0x80f8a078, 0xf42d003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc887c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0x80f8c078, 0xf431003a, + 0xf0000000, 0xbf8cc07f, + 0x80fc907c, 0xbf800000, + 0xbe803100, 0xbe823102, + 0xbe843104, 0xbe863106, + 0xbe883108, 0xbe8a310a, + 0xbe8c310c, 0xbe8e310e, + 0xbf06807c, 0xbf84fff0, + 0xb9782a05, 0x80788178, + 0x907c9973, 0x877c817c, + 0xbf06817c, 0xbf850002, + 0x8f788978, 0xbf820001, + 0x8f788a78, 0xb9721e06, + 0x8f728a72, 0x80787278, + 0x8078ff78, 0x00000200, + 0xbef603ff, 0x01000000, + 0xf4211bfa, 0xf0000000, + 0x80788478, 0xf4211b3a, 0xf0000000, 0x80788478, - 0xf4211b3a, 0xf0000000, - 0x80788478, 0xf4211b7a, + 0xf4211b7a, 0xf0000000, + 0x80788478, 0xf4211eba, 0xf0000000, 0x80788478, - 0xf4211eba, 0xf0000000, - 0x80788478, 0xf4211efa, + 0xf4211efa, 0xf0000000, + 0x80788478, 0xf4211c3a, 0xf0000000, 0x80788478, - 0xf4211c3a, 0xf0000000, - 0x80788478, 0xf4211c7a, + 0xf4211c7a, 0xf0000000, + 0x80788478, 0xf4211e7a, 0xf0000000, 0x80788478, - 0xf4211e7a, 0xf0000000, - 0x80788478, 0xf4211cfa, + 0xf4211cfa, 0xf0000000, + 0x80788478, 0xf4211bba, 0xf0000000, 0x80788478, + 0xbf8cc07f, 0xb9eef814, 0xf4211bba, 0xf0000000, 0x80788478, 0xbf8cc07f, - 0xb9eef814, 0xf4211bba, - 0xf0000000, 0x80788478, - 0xbf8cc07f, 0xb9eef815, - 0xbef2036d, 0x876dff72, - 0x0000ffff, 0xbefc036f, - 0xbefe037a, 0xbeff037b, - 0x876f71ff, 0x000003ff, - 0xb9ef4803, 0xb9f9f816, - 0x876f71ff, 0xfffff800, - 0x906f8b6f, 0xb9efa2c3, - 0xb9f3f801, 0x876fff72, - 0xfc000000, 0x906f9a6f, - 0x8f6f906f, 0xbef30380, + 0xb9eef815, 0xbef2036d, + 0x876dff72, 0x0000ffff, + 0xbefc036f, 0xbefe037a, + 0xbeff037b, 0x876f71ff, + 0x000003ff, 0xb9ef4803, + 0xb9f9f816, 0x876f71ff, + 0xfffff800, 0x906f8b6f, + 0xb9efa2c3, 0xb9f3f801, + 0x876fff72, 0xfc000000, + 0x906f9a6f, 0x8f6f906f, + 0xbef30380, 0x88736f73, + 0x876fff72, 0x02000000, + 0x906f996f, 0x8f6f8f6f, 0x88736f73, 0x876fff72, - 0x02000000, 0x906f996f, - 0x8f6f8f6f, 0x88736f73, - 0x876fff72, 0x01000000, - 0x906f986f, 0x8f6f996f, - 0x88736f73, 0x876fff70, - 0x00800000, 0x906f976f, - 0xb9f3f807, 0x87fe7e7e, - 0x87ea6a6a, 0xb9f0f802, - 0xbf8a0000, 0xbe80226c, - 0xbf810000, 0xbf9f0000, + 0x01000000, 0x906f986f, + 0x8f6f996f, 0x88736f73, + 0x876fff70, 0x00800000, + 0x906f976f, 0xb9f3f807, + 0x87fe7e7e, 0x87ea6a6a, + 0xb9f0f802, 0xbf8a0000, + 0xbe80226c, 0xbf810000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_arcturus_hex[] = { 0xbf820001, 0xbf8202c4, diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index cdaa523ce6be..4433bda2ce25 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -758,6 +758,7 @@ L_RESTORE_V0: buffer_load_dword v1, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256 buffer_load_dword v2, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*2 buffer_load_dword v3, v0, s_restore_buf_rsrc0, s_restore_mem_offset_save slc:1 glc:1 offset:256*3 + s_waitcnt vmcnt(0) /* restore SGPRs */ //will be 2+8+16*6 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 1d3cd5c50d5f..9af45d07515b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -282,7 +282,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, goto err_bind_process; } - pr_debug("Creating queue for PASID %d on gpu 0x%x\n", + pr_debug("Creating queue for PASID 0x%x on gpu 0x%x\n", p->pasid, dev->id); @@ -332,7 +332,7 @@ static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, int retval; struct kfd_ioctl_destroy_queue_args *args = data; - pr_debug("Destroying queue id %d for pasid %d\n", + pr_debug("Destroying queue id %d for pasid 0x%x\n", args->queue_id, p->pasid); @@ -378,7 +378,7 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, properties.queue_percent = args->queue_percentage; properties.priority = args->queue_priority; - pr_debug("Updating queue id %d for pasid %d\n", + pr_debug("Updating queue id %d for pasid 0x%x\n", args->queue_id, p->pasid); mutex_lock(&p->mutex); @@ -855,7 +855,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, struct kfd_process_device_apertures *pAperture; struct kfd_process_device *pdd; - dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); + dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid); args->num_of_nodes = 0; @@ -913,7 +913,7 @@ static int kfd_ioctl_get_process_apertures_new(struct file *filp, uint32_t nodes = 0; int ret; - dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); + dev_dbg(kfd_device, "get apertures for PASID 0x%x", p->pasid); if (args->num_of_nodes == 0) { /* Return number of nodes, so that user space can alloacate @@ -1128,7 +1128,7 @@ static int kfd_ioctl_set_scratch_backing_va(struct file *filep, mutex_unlock(&p->mutex); if (dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS && - pdd->qpd.vmid != 0) + pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) dev->kfd2kgd->set_scratch_backing_va( dev->kgd, args->va_addr, pdd->qpd.vmid); @@ -1801,7 +1801,7 @@ static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) } else goto err_i1; - dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg); + dev_dbg(kfd_device, "ioctl cmd 0x%x (#0x%x), arg 0x%lx\n", cmd, nr, arg); process = kfd_get_process(current); if (IS_ERR(process)) { @@ -1856,7 +1856,8 @@ err_i1: kfree(kdata); if (retcode) - dev_dbg(kfd_device, "ret = %d\n", retcode); + dev_dbg(kfd_device, "ioctl cmd (#0x%x), arg 0x%lx, ret = %d\n", + nr, arg, retcode); return retcode; } @@ -1877,7 +1878,7 @@ static int kfd_mmio_mmap(struct kfd_dev *dev, struct kfd_process *process, vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - pr_debug("Process %d mapping mmio page\n" + pr_debug("pasid 0x%x mapping mmio page\n" " target user address == 0x%08llX\n" " physical address == 0x%08llX\n" " vm_flags == 0x%04lX\n" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 66387caf966e..de9f68d5c312 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -138,6 +138,7 @@ static struct kfd_gpu_cache_info carrizo_cache_info[] = { /* TODO - check & update Vega10 cache details */ #define vega10_cache_info carrizo_cache_info #define raven_cache_info carrizo_cache_info +#define renoir_cache_info carrizo_cache_info /* TODO - check & update Navi10 cache details */ #define navi10_cache_info carrizo_cache_info @@ -670,7 +671,13 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, pcache_info = raven_cache_info; num_of_cache_types = ARRAY_SIZE(raven_cache_info); break; + case CHIP_RENOIR: + pcache_info = renoir_cache_info; + num_of_cache_types = ARRAY_SIZE(renoir_cache_info); + break; case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: pcache_info = navi10_cache_info; num_of_cache_types = ARRAY_SIZE(navi10_cache_info); break; @@ -703,7 +710,7 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, pcache_info, cu_info, mem_available, - cu_info->cu_bitmap[i][j], + cu_info->cu_bitmap[i % 4][j + i / 4], ct, cu_processor_id, k); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c index a3441b0e385b..d59f2cd056c6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c @@ -761,6 +761,7 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) { int status = 0; unsigned int vmid; + uint16_t queried_pasid; union SQ_CMD_BITS reg_sq_cmd; union GRBM_GFX_INDEX_BITS reg_gfx_index; struct kfd_process_device *pdd; @@ -782,19 +783,18 @@ int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) */ for (vmid = first_vmid_to_scan; vmid <= last_vmid_to_scan; vmid++) { - if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_valid - (dev->kgd, vmid)) { - if (dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid - (dev->kgd, vmid) == p->pasid) { - pr_debug("Killing wave fronts of vmid %d and pasid %d\n", - vmid, p->pasid); - break; - } + status = dev->kfd2kgd->get_atc_vmid_pasid_mapping_info + (dev->kgd, vmid, &queried_pasid); + + if (status && queried_pasid == p->pasid) { + pr_debug("Killing wave fronts of vmid %d and pasid 0x%x\n", + vmid, p->pasid); + break; } } if (vmid > last_vmid_to_scan) { - pr_err("Didn't find vmid for pasid %d\n", p->pasid); + pr_err("Didn't find vmid for pasid 0x%x\n", p->pasid); return -EFAULT; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c index 9d4af961c5d1..9bfa50633654 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgmgr.c @@ -96,7 +96,7 @@ bool kfd_dbgmgr_create(struct kfd_dbgmgr **ppmgr, struct kfd_dev *pdev) long kfd_dbgmgr_register(struct kfd_dbgmgr *pmgr, struct kfd_process *p) { if (pmgr->pasid != 0) { - pr_debug("H/W debugger is already active using pasid %d\n", + pr_debug("H/W debugger is already active using pasid 0x%x\n", pmgr->pasid); return -EBUSY; } @@ -117,7 +117,7 @@ long kfd_dbgmgr_unregister(struct kfd_dbgmgr *pmgr, struct kfd_process *p) { /* Is the requests coming from the already registered process? */ if (pmgr->pasid != p->pasid) { - pr_debug("H/W debugger is not registered by calling pasid %d\n", + pr_debug("H/W debugger is not registered by calling pasid 0x%x\n", p->pasid); return -EINVAL; } @@ -134,7 +134,7 @@ long kfd_dbgmgr_wave_control(struct kfd_dbgmgr *pmgr, { /* Is the requests coming from the already registered process? */ if (pmgr->pasid != wac_info->process->pasid) { - pr_debug("H/W debugger support was not registered for requester pasid %d\n", + pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n", wac_info->process->pasid); return -EINVAL; } @@ -147,7 +147,7 @@ long kfd_dbgmgr_address_watch(struct kfd_dbgmgr *pmgr, { /* Is the requests coming from the already registered process? */ if (pmgr->pasid != adw_info->process->pasid) { - pr_debug("H/W debugger support was not registered for requester pasid %d\n", + pr_debug("H/W debugger support was not registered for requester pasid 0x%x\n", adw_info->process->pasid); return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 0dc1084b5e82..4fa8834ce7cb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -39,6 +39,41 @@ */ static atomic_t kfd_locked = ATOMIC_INIT(0); +#ifdef CONFIG_DRM_AMDGPU_CIK +extern const struct kfd2kgd_calls gfx_v7_kfd2kgd; +#endif +extern const struct kfd2kgd_calls gfx_v8_kfd2kgd; +extern const struct kfd2kgd_calls gfx_v9_kfd2kgd; +extern const struct kfd2kgd_calls arcturus_kfd2kgd; +extern const struct kfd2kgd_calls gfx_v10_kfd2kgd; + +static const struct kfd2kgd_calls *kfd2kgd_funcs[] = { +#ifdef KFD_SUPPORT_IOMMU_V2 +#ifdef CONFIG_DRM_AMDGPU_CIK + [CHIP_KAVERI] = &gfx_v7_kfd2kgd, +#endif + [CHIP_CARRIZO] = &gfx_v8_kfd2kgd, + [CHIP_RAVEN] = &gfx_v9_kfd2kgd, +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + [CHIP_HAWAII] = &gfx_v7_kfd2kgd, +#endif + [CHIP_TONGA] = &gfx_v8_kfd2kgd, + [CHIP_FIJI] = &gfx_v8_kfd2kgd, + [CHIP_POLARIS10] = &gfx_v8_kfd2kgd, + [CHIP_POLARIS11] = &gfx_v8_kfd2kgd, + [CHIP_POLARIS12] = &gfx_v8_kfd2kgd, + [CHIP_VEGAM] = &gfx_v8_kfd2kgd, + [CHIP_VEGA10] = &gfx_v9_kfd2kgd, + [CHIP_VEGA12] = &gfx_v9_kfd2kgd, + [CHIP_VEGA20] = &gfx_v9_kfd2kgd, + [CHIP_RENOIR] = &gfx_v9_kfd2kgd, + [CHIP_ARCTURUS] = &arcturus_kfd2kgd, + [CHIP_NAVI10] = &gfx_v10_kfd2kgd, + [CHIP_NAVI12] = &gfx_v10_kfd2kgd, + [CHIP_NAVI14] = &gfx_v10_kfd2kgd, +}; + #ifdef KFD_SUPPORT_IOMMU_V2 static const struct kfd_device_info kaveri_device_info = { .asic_family = CHIP_KAVERI, @@ -351,6 +386,24 @@ static const struct kfd_device_info arcturus_device_info = { .num_sdma_queues_per_engine = 8, }; +static const struct kfd_device_info renoir_device_info = { + .asic_family = CHIP_RENOIR, + .asic_name = "renoir", + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .supports_cwsr = true, + .needs_iommu_device = false, + .needs_pci_atomics = false, + .num_sdma_engines = 1, + .num_xgmi_sdma_engines = 0, + .num_sdma_queues_per_engine = 2, +}; + static const struct kfd_device_info navi10_device_info = { .asic_family = CHIP_NAVI10, .asic_name = "navi10", @@ -369,133 +422,64 @@ static const struct kfd_device_info navi10_device_info = { .num_sdma_queues_per_engine = 8, }; -struct kfd_deviceid { - unsigned short did; - const struct kfd_device_info *device_info; +static const struct kfd_device_info navi12_device_info = { + .asic_family = CHIP_NAVI12, + .asic_name = "navi12", + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .needs_iommu_device = false, + .supports_cwsr = true, + .needs_pci_atomics = false, + .num_sdma_engines = 2, + .num_xgmi_sdma_engines = 0, + .num_sdma_queues_per_engine = 8, +}; + +static const struct kfd_device_info navi14_device_info = { + .asic_family = CHIP_NAVI14, + .asic_name = "navi14", + .max_pasid_bits = 16, + .max_no_of_hqd = 24, + .doorbell_size = 8, + .ih_ring_entry_size = 8 * sizeof(uint32_t), + .event_interrupt_class = &event_interrupt_class_v9, + .num_of_watch_points = 4, + .mqd_size_aligned = MQD_SIZE_ALIGNED, + .needs_iommu_device = false, + .supports_cwsr = true, + .needs_pci_atomics = false, + .num_sdma_engines = 2, + .num_xgmi_sdma_engines = 0, + .num_sdma_queues_per_engine = 8, }; -static const struct kfd_deviceid supported_devices[] = { +/* For each entry, [0] is regular and [1] is virtualisation device. */ +static const struct kfd_device_info *kfd_supported_devices[][2] = { #ifdef KFD_SUPPORT_IOMMU_V2 - { 0x1304, &kaveri_device_info }, /* Kaveri */ - { 0x1305, &kaveri_device_info }, /* Kaveri */ - { 0x1306, &kaveri_device_info }, /* Kaveri */ - { 0x1307, &kaveri_device_info }, /* Kaveri */ - { 0x1309, &kaveri_device_info }, /* Kaveri */ - { 0x130A, &kaveri_device_info }, /* Kaveri */ - { 0x130B, &kaveri_device_info }, /* Kaveri */ - { 0x130C, &kaveri_device_info }, /* Kaveri */ - { 0x130D, &kaveri_device_info }, /* Kaveri */ - { 0x130E, &kaveri_device_info }, /* Kaveri */ - { 0x130F, &kaveri_device_info }, /* Kaveri */ - { 0x1310, &kaveri_device_info }, /* Kaveri */ - { 0x1311, &kaveri_device_info }, /* Kaveri */ - { 0x1312, &kaveri_device_info }, /* Kaveri */ - { 0x1313, &kaveri_device_info }, /* Kaveri */ - { 0x1315, &kaveri_device_info }, /* Kaveri */ - { 0x1316, &kaveri_device_info }, /* Kaveri */ - { 0x1317, &kaveri_device_info }, /* Kaveri */ - { 0x1318, &kaveri_device_info }, /* Kaveri */ - { 0x131B, &kaveri_device_info }, /* Kaveri */ - { 0x131C, &kaveri_device_info }, /* Kaveri */ - { 0x131D, &kaveri_device_info }, /* Kaveri */ - { 0x9870, &carrizo_device_info }, /* Carrizo */ - { 0x9874, &carrizo_device_info }, /* Carrizo */ - { 0x9875, &carrizo_device_info }, /* Carrizo */ - { 0x9876, &carrizo_device_info }, /* Carrizo */ - { 0x9877, &carrizo_device_info }, /* Carrizo */ - { 0x15DD, &raven_device_info }, /* Raven */ - { 0x15D8, &raven_device_info }, /* Raven */ + [CHIP_KAVERI] = {&kaveri_device_info, NULL}, + [CHIP_CARRIZO] = {&carrizo_device_info, NULL}, + [CHIP_RAVEN] = {&raven_device_info, NULL}, #endif - { 0x67A0, &hawaii_device_info }, /* Hawaii */ - { 0x67A1, &hawaii_device_info }, /* Hawaii */ - { 0x67A2, &hawaii_device_info }, /* Hawaii */ - { 0x67A8, &hawaii_device_info }, /* Hawaii */ - { 0x67A9, &hawaii_device_info }, /* Hawaii */ - { 0x67AA, &hawaii_device_info }, /* Hawaii */ - { 0x67B0, &hawaii_device_info }, /* Hawaii */ - { 0x67B1, &hawaii_device_info }, /* Hawaii */ - { 0x67B8, &hawaii_device_info }, /* Hawaii */ - { 0x67B9, &hawaii_device_info }, /* Hawaii */ - { 0x67BA, &hawaii_device_info }, /* Hawaii */ - { 0x67BE, &hawaii_device_info }, /* Hawaii */ - { 0x6920, &tonga_device_info }, /* Tonga */ - { 0x6921, &tonga_device_info }, /* Tonga */ - { 0x6928, &tonga_device_info }, /* Tonga */ - { 0x6929, &tonga_device_info }, /* Tonga */ - { 0x692B, &tonga_device_info }, /* Tonga */ - { 0x6938, &tonga_device_info }, /* Tonga */ - { 0x6939, &tonga_device_info }, /* Tonga */ - { 0x7300, &fiji_device_info }, /* Fiji */ - { 0x730F, &fiji_vf_device_info }, /* Fiji vf*/ - { 0x67C0, &polaris10_device_info }, /* Polaris10 */ - { 0x67C1, &polaris10_device_info }, /* Polaris10 */ - { 0x67C2, &polaris10_device_info }, /* Polaris10 */ - { 0x67C4, &polaris10_device_info }, /* Polaris10 */ - { 0x67C7, &polaris10_device_info }, /* Polaris10 */ - { 0x67C8, &polaris10_device_info }, /* Polaris10 */ - { 0x67C9, &polaris10_device_info }, /* Polaris10 */ - { 0x67CA, &polaris10_device_info }, /* Polaris10 */ - { 0x67CC, &polaris10_device_info }, /* Polaris10 */ - { 0x67CF, &polaris10_device_info }, /* Polaris10 */ - { 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/ - { 0x67DF, &polaris10_device_info }, /* Polaris10 */ - { 0x6FDF, &polaris10_device_info }, /* Polaris10 */ - { 0x67E0, &polaris11_device_info }, /* Polaris11 */ - { 0x67E1, &polaris11_device_info }, /* Polaris11 */ - { 0x67E3, &polaris11_device_info }, /* Polaris11 */ - { 0x67E7, &polaris11_device_info }, /* Polaris11 */ - { 0x67E8, &polaris11_device_info }, /* Polaris11 */ - { 0x67E9, &polaris11_device_info }, /* Polaris11 */ - { 0x67EB, &polaris11_device_info }, /* Polaris11 */ - { 0x67EF, &polaris11_device_info }, /* Polaris11 */ - { 0x67FF, &polaris11_device_info }, /* Polaris11 */ - { 0x6980, &polaris12_device_info }, /* Polaris12 */ - { 0x6981, &polaris12_device_info }, /* Polaris12 */ - { 0x6985, &polaris12_device_info }, /* Polaris12 */ - { 0x6986, &polaris12_device_info }, /* Polaris12 */ - { 0x6987, &polaris12_device_info }, /* Polaris12 */ - { 0x6995, &polaris12_device_info }, /* Polaris12 */ - { 0x6997, &polaris12_device_info }, /* Polaris12 */ - { 0x699F, &polaris12_device_info }, /* Polaris12 */ - { 0x694C, &vegam_device_info }, /* VegaM */ - { 0x694E, &vegam_device_info }, /* VegaM */ - { 0x694F, &vegam_device_info }, /* VegaM */ - { 0x6860, &vega10_device_info }, /* Vega10 */ - { 0x6861, &vega10_device_info }, /* Vega10 */ - { 0x6862, &vega10_device_info }, /* Vega10 */ - { 0x6863, &vega10_device_info }, /* Vega10 */ - { 0x6864, &vega10_device_info }, /* Vega10 */ - { 0x6867, &vega10_device_info }, /* Vega10 */ - { 0x6868, &vega10_device_info }, /* Vega10 */ - { 0x6869, &vega10_device_info }, /* Vega10 */ - { 0x686A, &vega10_device_info }, /* Vega10 */ - { 0x686B, &vega10_device_info }, /* Vega10 */ - { 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/ - { 0x686D, &vega10_device_info }, /* Vega10 */ - { 0x686E, &vega10_device_info }, /* Vega10 */ - { 0x686F, &vega10_device_info }, /* Vega10 */ - { 0x687F, &vega10_device_info }, /* Vega10 */ - { 0x69A0, &vega12_device_info }, /* Vega12 */ - { 0x69A1, &vega12_device_info }, /* Vega12 */ - { 0x69A2, &vega12_device_info }, /* Vega12 */ - { 0x69A3, &vega12_device_info }, /* Vega12 */ - { 0x69AF, &vega12_device_info }, /* Vega12 */ - { 0x66a0, &vega20_device_info }, /* Vega20 */ - { 0x66a1, &vega20_device_info }, /* Vega20 */ - { 0x66a2, &vega20_device_info }, /* Vega20 */ - { 0x66a3, &vega20_device_info }, /* Vega20 */ - { 0x66a4, &vega20_device_info }, /* Vega20 */ - { 0x66a7, &vega20_device_info }, /* Vega20 */ - { 0x66af, &vega20_device_info }, /* Vega20 */ - { 0x738C, &arcturus_device_info }, /* Arcturus */ - { 0x7388, &arcturus_device_info }, /* Arcturus */ - { 0x738E, &arcturus_device_info }, /* Arcturus */ - { 0x7390, &arcturus_device_info }, /* Arcturus vf */ - { 0x7310, &navi10_device_info }, /* Navi10 */ - { 0x7312, &navi10_device_info }, /* Navi10 */ - { 0x7318, &navi10_device_info }, /* Navi10 */ - { 0x731a, &navi10_device_info }, /* Navi10 */ - { 0x731f, &navi10_device_info }, /* Navi10 */ + [CHIP_HAWAII] = {&hawaii_device_info, NULL}, + [CHIP_TONGA] = {&tonga_device_info, NULL}, + [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info}, + [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info}, + [CHIP_POLARIS11] = {&polaris11_device_info, NULL}, + [CHIP_POLARIS12] = {&polaris12_device_info, NULL}, + [CHIP_VEGAM] = {&vegam_device_info, NULL}, + [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info}, + [CHIP_VEGA12] = {&vega12_device_info, NULL}, + [CHIP_VEGA20] = {&vega20_device_info, NULL}, + [CHIP_RENOIR] = {&renoir_device_info, NULL}, + [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info}, + [CHIP_NAVI10] = {&navi10_device_info, NULL}, + [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info}, + [CHIP_NAVI14] = {&navi14_device_info, NULL}, }; static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, @@ -504,32 +488,25 @@ static void kfd_gtt_sa_fini(struct kfd_dev *kfd); static int kfd_resume(struct kfd_dev *kfd); -static const struct kfd_device_info *lookup_device_info(unsigned short did) +struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, + struct pci_dev *pdev, unsigned int asic_type, bool vf) { - size_t i; + struct kfd_dev *kfd; + const struct kfd_device_info *device_info; + const struct kfd2kgd_calls *f2g; - for (i = 0; i < ARRAY_SIZE(supported_devices); i++) { - if (supported_devices[i].did == did) { - WARN_ON(!supported_devices[i].device_info); - return supported_devices[i].device_info; - } + if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2) + || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) { + dev_err(kfd_device, "asic_type %d out of range\n", asic_type); + return NULL; /* asic_type out of range */ } - dev_warn(kfd_device, "DID %04x is missing in supported_devices\n", - did); + device_info = kfd_supported_devices[asic_type][vf]; + f2g = kfd2kgd_funcs[asic_type]; - return NULL; -} - -struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd, - struct pci_dev *pdev, const struct kfd2kgd_calls *f2g) -{ - struct kfd_dev *kfd; - const struct kfd_device_info *device_info = - lookup_device_info(pdev->device); - - if (!device_info) { - dev_err(kfd_device, "kgd2kfd_probe failed\n"); + if (!device_info || !f2g) { + dev_err(kfd_device, "%s %s not supported in kfd\n", + amdgpu_asic_name[asic_type], vf ? "VF" : ""); return NULL; } @@ -593,10 +570,12 @@ static void kfd_cwsr_init(struct kfd_dev *kfd) } bool kgd2kfd_device_init(struct kfd_dev *kfd, + struct drm_device *ddev, const struct kgd2kfd_shared_resources *gpu_resources) { unsigned int size; + kfd->ddev = ddev; kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, KGD_ENGINE_MEC1); kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd, @@ -751,9 +730,6 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) return 0; kgd2kfd_suspend(kfd); - /* hold dqm->lock to prevent further execution*/ - dqm_lock(kfd->dqm); - kfd_signal_reset_event(kfd); return 0; } @@ -771,8 +747,6 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd) if (!kfd->init_complete) return 0; - dqm_unlock(kfd->dqm); - ret = kfd_resume(kfd); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index d985e31fcc1e..984c2f2b24b6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -195,20 +195,30 @@ static int allocate_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) { - int bit, allocated_vmid; + int allocated_vmid = -1, i; - if (dqm->vmid_bitmap == 0) - return -ENOMEM; + for (i = dqm->dev->vm_info.first_vmid_kfd; + i <= dqm->dev->vm_info.last_vmid_kfd; i++) { + if (!dqm->vmid_pasid[i]) { + allocated_vmid = i; + break; + } + } + + if (allocated_vmid < 0) { + pr_err("no more vmid to allocate\n"); + return -ENOSPC; + } + + pr_debug("vmid allocated: %d\n", allocated_vmid); + + dqm->vmid_pasid[allocated_vmid] = q->process->pasid; - bit = ffs(dqm->vmid_bitmap) - 1; - dqm->vmid_bitmap &= ~(1 << bit); + set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid); - allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd; - pr_debug("vmid allocation %d\n", allocated_vmid); qpd->vmid = allocated_vmid; q->properties.vmid = allocated_vmid; - set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid); program_sh_mem_settings(dqm, qpd); /* qpd->page_table_base is set earlier when register_process() @@ -220,8 +230,9 @@ static int allocate_vmid(struct device_queue_manager *dqm, /* invalidate the VM context after pasid and vmid mapping is set up */ kfd_flush_tlb(qpd_to_pdd(qpd)); - dqm->dev->kfd2kgd->set_scratch_backing_va( - dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid); + if (dqm->dev->kfd2kgd->set_scratch_backing_va) + dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd, + qpd->sh_hidden_private_base, qpd->vmid); return 0; } @@ -248,8 +259,6 @@ static void deallocate_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) { - int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd; - /* On GFX v7, CP doesn't flush TC at dequeue */ if (q->device->device_info->asic_family == CHIP_HAWAII) if (flush_texture_cache_nocpsch(q->device, qpd)) @@ -259,8 +268,8 @@ static void deallocate_vmid(struct device_queue_manager *dqm, /* Release the vmid mapping */ set_pasid_vmid_mapping(dqm, 0, qpd->vmid); + dqm->vmid_pasid[qpd->vmid] = 0; - dqm->vmid_bitmap |= (1 << bit); qpd->vmid = 0; q->properties.vmid = 0; } @@ -331,6 +340,10 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, &q->properties); if (q->properties.is_active) { + if (!dqm->sched_running) { + WARN_ONCE(1, "Load non-HWS mqd while stopped\n"); + goto add_queue_to_list; + } if (WARN(q->process->mm != current->mm, "should only run in user thread")) @@ -342,6 +355,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm, goto out_free_mqd; } +add_queue_to_list: list_add(&q->list, &qpd->queues_list); qpd->queue_count++; if (q->properties.is_active) @@ -449,6 +463,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm, deallocate_doorbell(qpd, q); + if (!dqm->sched_running) { + WARN_ONCE(1, "Destroy non-HWS queue while stopped\n"); + return 0; + } + retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, KFD_PREEMPT_TYPE_WAVEFRONT_RESET, KFD_UNMAP_LATENCY_MS, @@ -524,6 +543,12 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) (q->properties.type == KFD_QUEUE_TYPE_COMPUTE || q->properties.type == KFD_QUEUE_TYPE_SDMA || q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) { + + if (!dqm->sched_running) { + WARN_ONCE(1, "Update non-HWS queue while stopped\n"); + goto out_unlock; + } + retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -579,7 +604,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, goto out; pdd = qpd_to_pdd(qpd); - pr_info_ratelimited("Evicting PASID %u queues\n", + pr_info_ratelimited("Evicting PASID 0x%x queues\n", pdd->process->pasid); /* Mark all queues as evicted. Deactivate all active queues on @@ -593,6 +618,11 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = false; + dqm->queue_count--; + + if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n")) + continue; + retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN, KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -601,7 +631,6 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, * maintain a consistent eviction state */ ret = retval; - dqm->queue_count--; } out: @@ -621,7 +650,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, goto out; pdd = qpd_to_pdd(qpd); - pr_info_ratelimited("Evicting PASID %u queues\n", + pr_info_ratelimited("Evicting PASID 0x%x queues\n", pdd->process->pasid); /* Mark all queues as evicted. Deactivate all active queues on @@ -667,7 +696,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, goto out; } - pr_info_ratelimited("Restoring PASID %u queues\n", + pr_info_ratelimited("Restoring PASID 0x%x queues\n", pdd->process->pasid); /* Update PD Base in QPD */ @@ -702,6 +731,11 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type( q->properties.type)]; q->properties.is_active = true; + dqm->queue_count++; + + if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n")) + continue; + retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue, &q->properties, mm); if (retval && !ret) @@ -709,7 +743,6 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm, * maintain a consistent eviction state */ ret = retval; - dqm->queue_count++; } qpd->evicted = 0; out: @@ -739,7 +772,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, goto out; } - pr_info_ratelimited("Restoring PASID %u queues\n", + pr_info_ratelimited("Restoring PASID 0x%x queues\n", pdd->process->pasid); /* Update PD Base in QPD */ @@ -879,7 +912,8 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) dqm->allocated_queues[pipe] |= 1 << queue; } - dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1; + memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid)); + dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm)); dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm)); @@ -902,12 +936,20 @@ static void uninitialize(struct device_queue_manager *dqm) static int start_nocpsch(struct device_queue_manager *dqm) { init_interrupts(dqm); - return pm_init(&dqm->packets, dqm); + + if (dqm->dev->device_info->asic_family == CHIP_HAWAII) + return pm_init(&dqm->packets, dqm); + dqm->sched_running = true; + + return 0; } static int stop_nocpsch(struct device_queue_manager *dqm) { - pm_uninit(&dqm->packets); + if (dqm->dev->device_info->asic_family == CHIP_HAWAII) + pm_uninit(&dqm->packets); + dqm->sched_running = false; + return 0; } @@ -1058,6 +1100,7 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm_lock(dqm); /* clear hang status when driver try to start the hw scheduler */ dqm->is_hws_hang = false; + dqm->sched_running = true; execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); dqm_unlock(dqm); @@ -1073,6 +1116,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) { dqm_lock(dqm); unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); + dqm->sched_running = false; dqm_unlock(dqm); kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); @@ -1259,9 +1303,10 @@ static int map_queues_cpsch(struct device_queue_manager *dqm) { int retval; + if (!dqm->sched_running) + return 0; if (dqm->queue_count <= 0 || dqm->processes_count <= 0) return 0; - if (dqm->active_runlist) return 0; @@ -1283,6 +1328,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, { int retval = 0; + if (!dqm->sched_running) + return 0; if (dqm->is_hws_hang) return -EIO; if (!dqm->active_runlist) @@ -1676,7 +1723,8 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) struct kfd_dev *dev = dqm->dev; struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * - dev->device_info->num_sdma_engines * + (dev->device_info->num_sdma_engines + + dev->device_info->num_xgmi_sdma_engines) * dev->device_info->num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; @@ -1786,10 +1834,13 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: device_queue_manager_init_v9(&dqm->asic_ops); break; case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: device_queue_manager_init_v10_navi10(&dqm->asic_ops); break; default: @@ -1883,6 +1934,12 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) int pipe, queue; int r = 0; + if (!dqm->sched_running) { + seq_printf(m, " Device is stopped\n"); + + return 0; + } + r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd, KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, &dump, &n_regs); @@ -1917,7 +1974,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) } } - for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) { + for (pipe = 0; pipe < get_num_sdma_engines(dqm) + + get_num_xgmi_sdma_engines(dqm); pipe++) { for (queue = 0; queue < dqm->dev->device_info->num_sdma_queues_per_engine; queue++) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 90db2c9275f6..a8c37e6da027 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -32,6 +32,8 @@ #include "kfd_mqd_manager.h" +#define VMID_NUM 16 + struct device_process_node { struct qcm_process_device *qpd; struct list_head list; @@ -185,7 +187,8 @@ struct device_queue_manager { unsigned int *allocated_queues; uint64_t sdma_bitmap; uint64_t xgmi_sdma_bitmap; - unsigned int vmid_bitmap; + /* the pasid mapping for each kfd vmid */ + uint16_t vmid_pasid[VMID_NUM]; uint64_t pipelines_addr; struct kfd_mem_obj *pipeline_mem; uint64_t fence_gpu_addr; @@ -198,6 +201,7 @@ struct device_queue_manager { bool is_hws_hang; struct work_struct hw_exception_work; struct kfd_mem_obj hiq_sdma_mqd; + bool sched_running; }; void device_queue_manager_init_cik( diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index d674d4b3340f..908081c85de1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -852,8 +852,8 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, if (type == KFD_EVENT_TYPE_MEMORY) { dev_warn(kfd_device, - "Sending SIGSEGV to HSA Process with PID %d ", - p->lead_thread->pid); + "Sending SIGSEGV to process %d (pasid 0x%x)", + p->lead_thread->pid, p->pasid); send_sig(SIGSEGV, p->lead_thread, 0); } @@ -861,13 +861,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p, if (send_signal) { if (send_sigterm) { dev_warn(kfd_device, - "Sending SIGTERM to HSA Process with PID %d ", - p->lead_thread->pid); + "Sending SIGTERM to process %d (pasid 0x%x)", + p->lead_thread->pid, p->pasid); send_sig(SIGTERM, p->lead_thread, 0); } else { dev_err(kfd_device, - "HSA Process (PID %d) got unhandled exception", - p->lead_thread->pid); + "Process %d (pasid 0x%x) got unhandled exception", + p->lead_thread->pid, p->pasid); } } } @@ -936,7 +936,8 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid, /* Workaround on Raven to not kill the process when memory is freed * before IOMMU is able to finish processing all the excessive PPRs */ - if (dev->device_info->asic_family != CHIP_RAVEN) { + if (dev->device_info->asic_family != CHIP_RAVEN && + dev->device_info->asic_family != CHIP_RENOIR) { mutex_lock(&p->event_mutex); /* Lookup events by type and signal them */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c index 9dc4bff8085e..bb77b8890e77 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c @@ -369,8 +369,13 @@ int kfd_init_apertures(struct kfd_process *process) /*Iterating over all devices*/ while (kfd_topology_enum_kfd_devices(id, &dev) == 0) { - if (!dev) { - id++; /* Skip non GPU devices */ + if (!dev || kfd_devcgroup_check_permission(dev)) { + /* Skip non GPU devices and devices to which the + * current process have no access to. Access can be + * limited by placing the process in a specific + * cgroup hierarchy + */ + id++; continue; } @@ -405,8 +410,11 @@ int kfd_init_apertures(struct kfd_process *process) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: kfd_init_apertures_v9(pdd, id); break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 3ef67d2e0d9f..e05d75ecda21 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -54,8 +54,7 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev, memcpy(patched_ihre, ih_ring_entry, dev->device_info->ih_ring_entry_size); - pasid = dev->kfd2kgd->get_atc_vmid_pasid_mapping_pasid( - dev->kgd, vmid); + pasid = dev->dqm->vmid_pasid[vmid]; /* Patch the pasid field */ patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3]) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c index c56ac47cd318..bc47f6a44456 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c @@ -62,6 +62,11 @@ int kfd_interrupt_init(struct kfd_dev *kfd) } kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI, 1); + if (unlikely(!kfd->ih_wq)) { + kfifo_free(&kfd->ih_fifo); + dev_err(kfd_chardev(), "Failed to allocate KFD IH workqueue\n"); + return -ENOMEM; + } spin_lock_init(&kfd->interrupt_lock); INIT_WORK(&kfd->interrupt_work, interrupt_wq); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 5f35df23fb18..193e2835bd4d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -160,7 +160,7 @@ static void iommu_pasid_shutdown_callback(struct pci_dev *pdev, int pasid) if (!p) return; - pr_debug("Unbinding process %d from IOMMU\n", pasid); + pr_debug("Unbinding process 0x%x from IOMMU\n", pasid); mutex_lock(kfd_get_dbgmgr_mutex()); @@ -194,7 +194,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid, struct kfd_dev *dev; dev_warn_ratelimited(kfd_device, - "Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X", + "Invalid PPR device %x:%x.%x pasid 0x%x address 0x%lX flags 0x%X", PCI_BUS_NUM(pdev->devfn), PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), @@ -235,7 +235,7 @@ static int kfd_bind_processes_to_device(struct kfd_dev *kfd) err = amd_iommu_bind_pasid(kfd->pdev, p->pasid, p->lead_thread); if (err < 0) { - pr_err("Unexpected pasid %d binding failure\n", + pr_err("Unexpected pasid 0x%x binding failure\n", p->pasid); mutex_unlock(&p->mutex); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c index 8b4564f71a7a..11d244891393 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c @@ -330,10 +330,13 @@ struct kernel_queue *kernel_queue_init(struct kfd_dev *dev, case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: kernel_queue_init_v9(&kq->ops_asic_specific); break; case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: kernel_queue_init_v10(&kq->ops_asic_specific); break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 986ff52d5750..f4b7f7e6c40e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -82,7 +82,7 @@ static void kfd_exit(void) kfd_chardev_exit(); } -int kgd2kfd_init() +int kgd2kfd_init(void) { return kfd_init(); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 9cd3eb2d90bd..4a236b2c2354 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -69,35 +69,13 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, struct queue_properties *q) { - int retval; - struct kfd_mem_obj *mqd_mem_obj = NULL; + struct kfd_mem_obj *mqd_mem_obj; - /* From V9, for CWSR, the control stack is located on the next page - * boundary after the mqd, we will use the gtt allocation function - * instead of sub-allocation function. - */ - if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { - mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO); - if (!mqd_mem_obj) - return NULL; - retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->kgd, - ALIGN(q->ctl_stack_size, PAGE_SIZE) + - ALIGN(sizeof(struct v10_compute_mqd), PAGE_SIZE), - &(mqd_mem_obj->gtt_mem), - &(mqd_mem_obj->gpu_addr), - (void *)&(mqd_mem_obj->cpu_ptr), true); - } else { - retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd), - &mqd_mem_obj); - } - - if (retval) { - kfree(mqd_mem_obj); + if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd), + &mqd_mem_obj)) return NULL; - } return mqd_mem_obj; - } static void init_mqd(struct mqd_manager *mm, void **mqd, @@ -250,14 +228,7 @@ static int destroy_mqd(struct mqd_manager *mm, void *mqd, static void free_mqd(struct mqd_manager *mm, void *mqd, struct kfd_mem_obj *mqd_mem_obj) { - struct kfd_dev *kfd = mm->dev; - - if (mqd_mem_obj->gtt_mem) { - amdgpu_amdkfd_free_gtt_mem(kfd->kgd, mqd_mem_obj->gtt_mem); - kfree(mqd_mem_obj); - } else { - kfd_gtt_sa_free(mm->dev, mqd_mem_obj); - } + kfd_gtt_sa_free(mm->dev, mqd_mem_obj); } static bool is_occupied(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 2c8624c5b42c..83ef4b3dd2fb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -239,10 +239,13 @@ int pm_init(struct packet_manager *pm, struct device_queue_manager *dqm) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: pm->pmf = &kfd_v9_pm_funcs; break; case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: pm->pmf = &kfd_v10_pm_funcs; break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index c89326125d71..060a9e8b301e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -36,6 +36,10 @@ #include <linux/seq_file.h> #include <linux/kref.h> #include <linux/sysfs.h> +#include <linux/device_cgroup.h> +#include <drm/drm_file.h> +#include <drm/drm_drv.h> +#include <drm/drm_device.h> #include <kgd_kfd_interface.h> #include "amd_shared.h" @@ -179,10 +183,6 @@ enum cache_policy { cache_policy_noncoherent }; -#define KFD_IS_VI(chip) ((chip) >= CHIP_CARRIZO && (chip) <= CHIP_POLARIS11) -#define KFD_IS_DGPU(chip) (((chip) >= CHIP_TONGA && \ - (chip) <= CHIP_NAVI10) || \ - (chip) == CHIP_HAWAII) #define KFD_IS_SOC15(chip) ((chip) >= CHIP_VEGA10) struct kfd_event_interrupt_class { @@ -230,6 +230,7 @@ struct kfd_dev { const struct kfd_device_info *device_info; struct pci_dev *pdev; + struct drm_device *ddev; unsigned int id; /* topology stub index */ @@ -687,7 +688,7 @@ struct kfd_process { /* We want to receive a notification when the mm_struct is destroyed */ struct mmu_notifier mmu_notifier; - unsigned int pasid; + uint16_t pasid; unsigned int doorbell_index; /* @@ -1040,6 +1041,21 @@ bool kfd_is_locked(void); void kfd_inc_compute_active(struct kfd_dev *dev); void kfd_dec_compute_active(struct kfd_dev *dev); +/* Cgroup Support */ +/* Check with device cgroup if @kfd device is accessible */ +static inline int kfd_devcgroup_check_permission(struct kfd_dev *kfd) +{ +#if defined(CONFIG_CGROUP_DEVICE) + struct drm_device *ddev = kfd->ddev; + + return devcgroup_check_permission(DEVCG_DEV_CHAR, ddev->driver->major, + ddev->render->index, + DEVCG_ACC_WRITE | DEVCG_ACC_READ); +#else + return 0; +#endif +} + /* Debugfs */ #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 40e3fc0c6942..10f9af5784f2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -416,7 +416,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p) list_for_each_entry_safe(pdd, temp, &p->per_device_data, per_device_list) { - pr_debug("Releasing pdd (topology id %d) for process (pasid %d)\n", + pr_debug("Releasing pdd (topology id %d) for process (pasid 0x%x)\n", pdd->dev->id, p->pasid); if (pdd->drm_file) { @@ -687,6 +687,8 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, struct kfd_dev *dev) { unsigned int i; + int range_start = dev->shared_resources.non_cp_doorbells_start; + int range_end = dev->shared_resources.non_cp_doorbells_end; if (!KFD_IS_SOC15(dev->device_info->asic_family)) return 0; @@ -698,14 +700,16 @@ static int init_doorbell_bitmap(struct qcm_process_device *qpd, return -ENOMEM; /* Mask out doorbells reserved for SDMA, IH, and VCN on SOC15. */ + pr_debug("reserved doorbell 0x%03x - 0x%03x\n", range_start, range_end); + pr_debug("reserved doorbell 0x%03x - 0x%03x\n", + range_start + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, + range_end + KFD_QUEUE_DOORBELL_MIRROR_OFFSET); + for (i = 0; i < KFD_MAX_NUM_OF_QUEUES_PER_PROCESS / 2; i++) { - if (i >= dev->shared_resources.non_cp_doorbells_start - && i <= dev->shared_resources.non_cp_doorbells_end) { + if (i >= range_start && i <= range_end) { set_bit(i, qpd->doorbell_bitmap); set_bit(i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET, qpd->doorbell_bitmap); - pr_debug("reserved doorbell 0x%03x and 0x%03x\n", i, - i + KFD_QUEUE_DOORBELL_MIRROR_OFFSET); } } @@ -1020,7 +1024,7 @@ static void evict_process_worker(struct work_struct *work) */ flush_delayed_work(&p->restore_work); - pr_debug("Started evicting pasid %d\n", p->pasid); + pr_debug("Started evicting pasid 0x%x\n", p->pasid); ret = kfd_process_evict_queues(p); if (!ret) { dma_fence_signal(p->ef); @@ -1029,9 +1033,9 @@ static void evict_process_worker(struct work_struct *work) queue_delayed_work(kfd_restore_wq, &p->restore_work, msecs_to_jiffies(PROCESS_RESTORE_TIME_MS)); - pr_debug("Finished evicting pasid %d\n", p->pasid); + pr_debug("Finished evicting pasid 0x%x\n", p->pasid); } else - pr_err("Failed to evict queues of pasid %d\n", p->pasid); + pr_err("Failed to evict queues of pasid 0x%x\n", p->pasid); } static void restore_process_worker(struct work_struct *work) @@ -1046,7 +1050,7 @@ static void restore_process_worker(struct work_struct *work) * lifetime of this thread, kfd_process p will be valid */ p = container_of(dwork, struct kfd_process, restore_work); - pr_debug("Started restoring pasid %d\n", p->pasid); + pr_debug("Started restoring pasid 0x%x\n", p->pasid); /* Setting last_restore_timestamp before successful restoration. * Otherwise this would have to be set by KGD (restore_process_bos) @@ -1062,7 +1066,7 @@ static void restore_process_worker(struct work_struct *work) ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, &p->ef); if (ret) { - pr_debug("Failed to restore BOs of pasid %d, retry after %d ms\n", + pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n", p->pasid, PROCESS_BACK_OFF_TIME_MS); ret = queue_delayed_work(kfd_restore_wq, &p->restore_work, msecs_to_jiffies(PROCESS_BACK_OFF_TIME_MS)); @@ -1072,9 +1076,9 @@ static void restore_process_worker(struct work_struct *work) ret = kfd_process_restore_queues(p); if (!ret) - pr_debug("Finished restoring pasid %d\n", p->pasid); + pr_debug("Finished restoring pasid 0x%x\n", p->pasid); else - pr_err("Failed to restore queues of pasid %d\n", p->pasid); + pr_err("Failed to restore queues of pasid 0x%x\n", p->pasid); } void kfd_suspend_all_processes(void) @@ -1088,7 +1092,7 @@ void kfd_suspend_all_processes(void) cancel_delayed_work_sync(&p->restore_work); if (kfd_process_evict_queues(p)) - pr_err("Failed to suspend process %d\n", p->pasid); + pr_err("Failed to suspend process 0x%x\n", p->pasid); dma_fence_signal(p->ef); dma_fence_put(p->ef); p->ef = NULL; @@ -1171,7 +1175,7 @@ int kfd_debugfs_mqds_by_process(struct seq_file *m, void *data) int idx = srcu_read_lock(&kfd_processes_srcu); hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) { - seq_printf(m, "Process %d PASID %d:\n", + seq_printf(m, "Process %d PASID 0x%x:\n", p->lead_thread->tgid, p->pasid); mutex_lock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 7e6c3ee82f5b..2659d226c056 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -53,7 +53,7 @@ static int find_available_queue_slot(struct process_queue_manager *pqm, pr_debug("The new slot id %lu\n", found); if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) { - pr_info("Cannot open more queues for process with pasid %d\n", + pr_info("Cannot open more queues for process with pasid 0x%x\n", pqm->process->pasid); return -ENOMEM; } @@ -298,7 +298,7 @@ int pqm_create_queue(struct process_queue_manager *pqm, } if (retval != 0) { - pr_err("Pasid %d DQM create queue %d failed. ret %d\n", + pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n", pqm->process->pasid, type, retval); goto err_create_queue; } @@ -377,7 +377,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid) dqm = pqn->q->device->dqm; retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q); if (retval) { - pr_err("Pasid %d destroy queue %d failed, ret %d\n", + pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n", pqm->process->pasid, pqn->q->properties.queue_id, retval); if (retval != -ETIME) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 7551761f2aa9..69bd0628fdc6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -269,6 +269,8 @@ static ssize_t iolink_show(struct kobject *kobj, struct attribute *attr, buffer[0] = 0; iolink = container_of(attr, struct kfd_iolink_properties, attr); + if (iolink->gpu && kfd_devcgroup_check_permission(iolink->gpu)) + return -EPERM; sysfs_show_32bit_prop(buffer, "type", iolink->iolink_type); sysfs_show_32bit_prop(buffer, "version_major", iolink->ver_maj); sysfs_show_32bit_prop(buffer, "version_minor", iolink->ver_min); @@ -305,6 +307,8 @@ static ssize_t mem_show(struct kobject *kobj, struct attribute *attr, buffer[0] = 0; mem = container_of(attr, struct kfd_mem_properties, attr); + if (mem->gpu && kfd_devcgroup_check_permission(mem->gpu)) + return -EPERM; sysfs_show_32bit_prop(buffer, "heap_type", mem->heap_type); sysfs_show_64bit_prop(buffer, "size_in_bytes", mem->size_in_bytes); sysfs_show_32bit_prop(buffer, "flags", mem->flags); @@ -334,6 +338,8 @@ static ssize_t kfd_cache_show(struct kobject *kobj, struct attribute *attr, buffer[0] = 0; cache = container_of(attr, struct kfd_cache_properties, attr); + if (cache->gpu && kfd_devcgroup_check_permission(cache->gpu)) + return -EPERM; sysfs_show_32bit_prop(buffer, "processor_id_low", cache->processor_id_low); sysfs_show_32bit_prop(buffer, "level", cache->cache_level); @@ -414,6 +420,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, if (strcmp(attr->name, "gpu_id") == 0) { dev = container_of(attr, struct kfd_topology_device, attr_gpuid); + if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) + return -EPERM; return sysfs_show_32bit_val(buffer, dev->gpu_id); } @@ -421,11 +429,15 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev = container_of(attr, struct kfd_topology_device, attr_name); + if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) + return -EPERM; return sysfs_show_str_val(buffer, dev->node_props.name); } dev = container_of(attr, struct kfd_topology_device, attr_props); + if (dev->gpu && kfd_devcgroup_check_permission(dev->gpu)) + return -EPERM; sysfs_show_32bit_prop(buffer, "cpu_cores_count", dev->node_props.cpu_cores_count); sysfs_show_32bit_prop(buffer, "simd_count", @@ -1098,6 +1110,9 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) { struct kfd_topology_device *dev; struct kfd_topology_device *out_dev = NULL; + struct kfd_mem_properties *mem; + struct kfd_cache_properties *cache; + struct kfd_iolink_properties *iolink; down_write(&topology_lock); list_for_each_entry(dev, &topology_device_list, list) { @@ -1111,6 +1126,13 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) if (!dev->gpu && (dev->node_props.simd_count > 0)) { dev->gpu = gpu; out_dev = dev; + + list_for_each_entry(mem, &dev->mem_props, list) + mem->gpu = dev->gpu; + list_for_each_entry(cache, &dev->cache_props, list) + cache->gpu = dev->gpu; + list_for_each_entry(iolink, &dev->io_link_props, list) + iolink->gpu = dev->gpu; break; } } @@ -1317,8 +1339,11 @@ int kfd_topology_add_device(struct kfd_dev *gpu) case CHIP_VEGA12: case CHIP_VEGA20: case CHIP_RAVEN: + case CHIP_RENOIR: case CHIP_ARCTURUS: case CHIP_NAVI10: + case CHIP_NAVI12: + case CHIP_NAVI14: dev->node_props.capability |= ((HSA_CAP_DOORBELL_TYPE_2_0 << HSA_CAP_DOORBELL_TYPE_TOTALBITS_SHIFT) & HSA_CAP_DOORBELL_TYPE_TOTALBITS_MASK); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h index d4718d58d0f2..15843e0fc756 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h @@ -102,6 +102,7 @@ struct kfd_mem_properties { uint32_t flags; uint32_t width; uint32_t mem_clk_max; + struct kfd_dev *gpu; struct kobject *kobj; struct attribute attr; }; @@ -123,6 +124,7 @@ struct kfd_cache_properties { uint32_t cache_latency; uint32_t cache_type; uint8_t sibling_map[CRAT_SIBLINGMAP_SIZE]; + struct kfd_dev *gpu; struct kobject *kobj; struct attribute attr; }; @@ -141,6 +143,7 @@ struct kfd_iolink_properties { uint32_t max_bandwidth; uint32_t rec_transfer_size; uint32_t flags; + struct kfd_dev *gpu; struct kobject *kobj; struct attribute attr; }; diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig index 71991a28a775..313183b80032 100644 --- a/drivers/gpu/drm/amd/display/Kconfig +++ b/drivers/gpu/drm/amd/display/Kconfig @@ -23,16 +23,16 @@ config DRM_AMD_DC_DCN2_0 depends on DRM_AMD_DC && X86 depends on DRM_AMD_DC_DCN1_0 help - Choose this option if you want to have - Navi support for display engine + Choose this option if you want to have + Navi support for display engine config DRM_AMD_DC_DCN2_1 - bool "DCN 2.1 family" - depends on DRM_AMD_DC && X86 - depends on DRM_AMD_DC_DCN2_0 - help - Choose this option if you want to have - Renoir support for display engine + bool "DCN 2.1 family" + depends on DRM_AMD_DC && X86 + depends on DRM_AMD_DC_DCN2_0 + help + Choose this option if you want to have + Renoir support for display engine config DRM_AMD_DC_DSC_SUPPORT bool "DSC support" @@ -41,8 +41,16 @@ config DRM_AMD_DC_DSC_SUPPORT depends on DRM_AMD_DC_DCN1_0 depends on DRM_AMD_DC_DCN2_0 help - Choose this option if you want to have - Dynamic Stream Compression support + Choose this option if you want to have + Dynamic Stream Compression support + +config DRM_AMD_DC_HDCP + bool "Enable HDCP support in DC" + depends on DRM_AMD_DC + help + Choose this option + if you want to support + HDCP authentication config DEBUG_KERNEL_DC bool "Enable kgdb break in DC" diff --git a/drivers/gpu/drm/amd/display/Makefile b/drivers/gpu/drm/amd/display/Makefile index 496cee000f10..36b3d6a5d04d 100644 --- a/drivers/gpu/drm/amd/display/Makefile +++ b/drivers/gpu/drm/amd/display/Makefile @@ -34,12 +34,19 @@ subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/freesync subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/color subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/info_packet subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/power +ifdef CONFIG_DRM_AMD_DC_HDCP +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/modules/hdcp +endif #TODO: remove when Timing Sync feature is complete subdir-ccflags-y += -DBUILD_FEATURE_TIMING_SYNC=0 DAL_LIBS = amdgpu_dm dc modules/freesync modules/color modules/info_packet modules/power +ifdef CONFIG_DRM_AMD_DC_HDCP +DAL_LIBS += modules/hdcp +endif + AMD_DAL = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/,$(DAL_LIBS))) include $(AMD_DAL) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile index 94911871eb9b..9a3b7bf8ab0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -31,6 +31,10 @@ ifneq ($(CONFIG_DRM_AMD_DC),) AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o endif +ifdef CONFIG_DRM_AMD_DC_HDCP +AMDGPUDM += amdgpu_dm_hdcp.o +endif + ifneq ($(CONFIG_DEBUG_FS),) AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4139f129eafb..7aac9568d3be 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -37,6 +37,9 @@ #include "amdgpu_ucode.h" #include "atom.h" #include "amdgpu_dm.h" +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "amdgpu_dm_hdcp.h" +#endif #include "amdgpu_pm.h" #include "amd_shared.h" @@ -67,6 +70,7 @@ #include <drm/drm_edid.h> #include <drm/drm_vblank.h> #include <drm/drm_audio_component.h> +#include <drm/drm_hdcp.h> #if defined(CONFIG_DRM_AMD_DC_DCN1_0) #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" @@ -143,6 +147,12 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, static void handle_cursor_update(struct drm_plane *plane, struct drm_plane_state *old_plane_state); +static void amdgpu_dm_set_psr_caps(struct dc_link *link); +static bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); +static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); +static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); + + /* * dm_vblank_get_counter * @@ -263,6 +273,13 @@ static inline bool amdgpu_dm_vrr_active(struct dm_crtc_state *dm_state) dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; } +/** + * dm_pflip_high_irq() - Handle pageflip interrupt + * @interrupt_params: ignored + * + * Handles the pageflip interrupt by notifying all interested parties + * that the pageflip has been completed. + */ static void dm_pflip_high_irq(void *interrupt_params) { struct amdgpu_crtc *amdgpu_crtc; @@ -407,6 +424,13 @@ static void dm_vupdate_high_irq(void *interrupt_params) } } +/** + * dm_crtc_high_irq() - Handles CRTC interrupt + * @interrupt_params: ignored + * + * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK + * event handler. + */ static void dm_crtc_high_irq(void *interrupt_params) { struct common_irq_params *irq_params = interrupt_params; @@ -646,11 +670,18 @@ void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) static int amdgpu_dm_init(struct amdgpu_device *adev) { struct dc_init_data init_data; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct dc_callback_init init_params; +#endif + adev->dm.ddev = adev->ddev; adev->dm.adev = adev; /* Zero all the fields */ memset(&init_data, 0, sizeof(init_data)); +#ifdef CONFIG_DRM_AMD_DC_HDCP + memset(&init_params, 0, sizeof(init_params)); +#endif mutex_init(&adev->dm.dc_lock); mutex_init(&adev->dm.audio_lock); @@ -697,6 +728,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) init_data.flags.multi_mon_pp_mclk_switch = true; + if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) + init_data.flags.disable_fractional_pwm = true; + init_data.flags.power_down_display_on_boot = true; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 @@ -713,6 +747,8 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; } + dc_hardware_init(adev->dm.dc); + adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); if (!adev->dm.freesync_module) { DRM_ERROR( @@ -723,6 +759,18 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) amdgpu_dm_init_color_mod(); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) { + adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc); + + if (!adev->dm.hdcp_workqueue) + DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); + else + DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); + + dc_init_callbacks(adev->dm.dc, &init_params); + } +#endif if (amdgpu_dm_initialize_drm_device(adev)) { DRM_ERROR( "amdgpu: failed to initialize sw for display support.\n"); @@ -764,6 +812,16 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) amdgpu_dm_destroy_drm_device(&adev->dm); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->dm.hdcp_workqueue) { + hdcp_destroy(adev->dm.hdcp_workqueue); + adev->dm.hdcp_workqueue = NULL; + } + + if (adev->dm.dc) + dc_deinit_callbacks(adev->dm.dc); +#endif + /* DC Destroy TODO: Replace destroy DAL */ if (adev->dm.dc) dc_destroy(&adev->dm.dc); @@ -897,27 +955,29 @@ static int detect_mst_link_for_all_connectors(struct drm_device *dev) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; int ret = 0; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type == dc_connection_mst_branch && aconnector->mst_mgr.aux) { DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", - aconnector, aconnector->base.base.id); + aconnector, + aconnector->base.base.id); ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); if (ret < 0) { DRM_ERROR("DM_MST: Failed to start MST\n"); - ((struct dc_link *)aconnector->dc_link)->type = dc_connection_single; - return ret; - } + aconnector->dc_link->type = + dc_connection_single; + break; } + } } + drm_connector_list_iter_end(&iter); - drm_modeset_unlock(&dev->mode_config.connection_mutex); return ret; } @@ -940,6 +1000,11 @@ static int dm_late_init(void *handle) params.backlight_lut_array_size = 16; params.backlight_lut_array = linear_lut; + /* Min backlight level after ABM reduction, Don't allow below 1% + * 0xFFFF x 0.01 = 0x28F + */ + params.min_abm_backlight = 0x28F; + /* todo will enable for navi10 */ if (adev->asic_type <= CHIP_RAVEN) { ret = dmcu_load_iram(dmcu, params); @@ -955,14 +1020,13 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) { struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct drm_dp_mst_topology_mgr *mgr; int ret; bool need_hotplug = false; - drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); - - list_for_each_entry(connector, &dev->mode_config.connector_list, - head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_port) @@ -973,15 +1037,14 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) if (suspend) { drm_dp_mst_topology_mgr_suspend(mgr); } else { - ret = drm_dp_mst_topology_mgr_resume(mgr); + ret = drm_dp_mst_topology_mgr_resume(mgr, true); if (ret < 0) { drm_dp_mst_topology_mgr_set_mst(mgr, false); need_hotplug = true; } } } - - drm_modeset_unlock(&dev->mode_config.connection_mutex); + drm_connector_list_iter_end(&iter); if (need_hotplug) drm_kms_helper_hotplug_event(dev); @@ -989,7 +1052,7 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) /** * dm_hw_init() - Initialize DC device - * @handle: The base driver device containing the amdpgu_dm device. + * @handle: The base driver device containing the amdgpu_dm device. * * Initialize the &struct amdgpu_display_manager device. This involves calling * the initializers of each DM component, then populating the struct with them. @@ -1019,7 +1082,7 @@ static int dm_hw_init(void *handle) /** * dm_hw_fini() - Teardown DC device - * @handle: The base driver device containing the amdpgu_dm device. + * @handle: The base driver device containing the amdgpu_dm device. * * Teardown components within &struct amdgpu_display_manager that require * cleanup. This involves cleaning up the DRM device, DC, and any modules that @@ -1163,6 +1226,7 @@ static int dm_resume(void *handle) struct amdgpu_display_manager *dm = &adev->dm; struct amdgpu_dm_connector *aconnector; struct drm_connector *connector; + struct drm_connector_list_iter iter; struct drm_crtc *crtc; struct drm_crtc_state *new_crtc_state; struct dm_crtc_state *dm_new_crtc_state; @@ -1185,17 +1249,18 @@ static int dm_resume(void *handle) /* program HPD filter */ dc_resume(dm->dc); - /* On resume we need to rewrite the MSTM control bits to enamble MST*/ - s3_handle_mst(ddev, false); - /* * early enable HPD Rx IRQ, should be done before set mode as short * pulse interrupts are used for MST */ amdgpu_dm_irq_resume_early(adev); + /* On resume we need to rewrite the MSTM control bits to enable MST*/ + s3_handle_mst(ddev, false); + /* Do detection*/ - list_for_each_entry(connector, &ddev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) { aconnector = to_amdgpu_dm_connector(connector); /* @@ -1223,6 +1288,7 @@ static int dm_resume(void *handle) amdgpu_dm_update_connector_after_detect(aconnector); mutex_unlock(&aconnector->hpd_lock); } + drm_connector_list_iter_end(&iter); /* Force mode set in atomic commit */ for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) @@ -1438,6 +1504,11 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector) dc_sink_release(aconnector->dc_sink); aconnector->dc_sink = NULL; aconnector->edid = NULL; +#ifdef CONFIG_DRM_AMD_DC_HDCP + /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ + if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; +#endif } mutex_unlock(&dev->mode_config.mutex); @@ -1452,6 +1523,9 @@ static void handle_hpd_irq(void *param) struct drm_connector *connector = &aconnector->base; struct drm_device *dev = connector->dev; enum dc_connection_type new_connection_type = dc_connection_none; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct amdgpu_device *adev = dev->dev_private; +#endif /* * In case of failure or MST no need to update connector status or notify the OS @@ -1459,6 +1533,10 @@ static void handle_hpd_irq(void *param) */ mutex_lock(&aconnector->hpd_lock); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) + hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); +#endif if (aconnector->fake_enable) aconnector->fake_enable = false; @@ -1577,6 +1655,12 @@ static void handle_hpd_rx_irq(void *param) struct dc_link *dc_link = aconnector->dc_link; bool is_mst_root_connector = aconnector->mst_mgr.mst_state; enum dc_connection_type new_connection_type = dc_connection_none; +#ifdef CONFIG_DRM_AMD_DC_HDCP + union hpd_irq_data hpd_irq_data; + struct amdgpu_device *adev = dev->dev_private; + + memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); +#endif /* * TODO:Temporary add mutex to protect hpd interrupt not have a gpio @@ -1586,7 +1670,12 @@ static void handle_hpd_rx_irq(void *param) if (dc_link->type != dc_connection_mst_branch) mutex_lock(&aconnector->hpd_lock); + +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, NULL) && +#else if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) && +#endif !is_mst_root_connector) { /* Downstream Port status changed. */ if (!dc_link_detect_sink(dc_link, &new_connection_type)) @@ -1621,6 +1710,10 @@ static void handle_hpd_rx_irq(void *param) drm_kms_helper_hotplug_event(dev); } } +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) + hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); +#endif if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) || (dc_link->type == dc_connection_mst_branch)) dm_handle_hpd_rx_irq(aconnector); @@ -2334,6 +2427,8 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) } else if (dc_link_detect(link, DETECT_REASON_BOOT)) { amdgpu_dm_update_connector_after_detect(aconnector); register_backlight_device(dm, link); + if (amdgpu_dc_feature_mask & DC_PSR_MASK) + amdgpu_dm_set_psr_caps(link); } @@ -3311,8 +3406,12 @@ static void fill_stream_properties_from_drm_display_mode( { struct dc_crtc_timing *timing_out = &stream->timing; const struct drm_display_info *info = &connector->display_info; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct hdmi_vendor_infoframe hv_frame; + struct hdmi_avi_infoframe avi_frame; - memset(timing_out, 0, sizeof(struct dc_crtc_timing)); + memset(&hv_frame, 0, sizeof(hv_frame)); + memset(&avi_frame, 0, sizeof(avi_frame)); timing_out->h_border_left = 0; timing_out->h_border_right = 0; @@ -3322,6 +3421,9 @@ static void fill_stream_properties_from_drm_display_mode( if (drm_mode_is_420_only(info, mode_in) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + else if (drm_mode_is_420_also(info, mode_in) + && aconnector->force_yuv420_output) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444) && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; @@ -3346,6 +3448,13 @@ static void fill_stream_properties_from_drm_display_mode( timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; } + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); + timing_out->vic = avi_frame.video_code; + drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); + timing_out->hdmi_vic = hv_frame.vic; + } + timing_out->h_addressable = mode_in->crtc_hdisplay; timing_out->h_total = mode_in->crtc_htotal; timing_out->h_sync_width = @@ -3566,6 +3675,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, stream->dm_stream_context = aconnector; + stream->timing.flags.LTE_340MCSC_SCRAMBLE = + drm_connector->display_info.hdmi.scdc.scrambling.low_rates; + list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { /* Search for preferred mode */ if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { @@ -3621,8 +3733,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, dc_link_get_link_cap(aconnector->dc_link)); if (dsc_caps.is_dsc_supported) - if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc, + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], &dsc_caps, + aconnector->dc_link->ctx->dc->debug.dsc_min_slice_height_override, link_bandwidth_kbps, &stream->timing, &stream->timing.dsc_cfg)) @@ -3639,6 +3752,18 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, update_stream_signal(stream, sink); + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) + mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket, false, false); + if (stream->link->psr_feature_enabled) { + struct dc *core_dc = stream->link->ctx->dc; + + if (dc_is_dmcu_initialized(core_dc)) { + struct dmcu *dmcu = core_dc->res_pool->dmcu; + + stream->psr_version = dmcu->dmcu_version.psr_version; + mod_build_vsc_infopacket(stream, &stream->vsc_infopacket); + } + } finish: dc_sink_release(sink); @@ -4114,8 +4239,8 @@ enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connec result = MODE_OK; else DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d\n", - mode->vdisplay, mode->hdisplay, + mode->vdisplay, mode->clock, dc_result); @@ -4494,7 +4619,7 @@ static int dm_plane_helper_prepare_fb(struct drm_plane *plane, tv.num_shared = 1; list_add(&tv.head, &list); - r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL, true); + r = ttm_eu_reserve_buffers(&ticket, &list, false, NULL); if (r) { dev_err(adev->dev, "fail to reserve bo (%d)\n", r); return r; @@ -4837,7 +4962,13 @@ static int to_drm_connector_type(enum signal_type st) static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) { - return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]); + struct drm_encoder *encoder; + + /* There is only one encoder per connector */ + drm_connector_for_each_possible_encoder(connector, encoder) + return encoder; + + return NULL; } static void amdgpu_dm_get_native_mode(struct drm_connector *connector) @@ -5082,6 +5213,10 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, drm_connector_attach_vrr_capable_property( &aconnector->base); +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (adev->asic_type >= CHIP_RAVEN) + drm_connector_attach_content_protection_property(&aconnector->base, false); +#endif } } @@ -5324,6 +5459,53 @@ is_scaling_state_different(const struct dm_connector_state *dm_state, return false; } +#ifdef CONFIG_DRM_AMD_DC_HDCP +static bool is_content_protection_different(struct drm_connector_state *state, + const struct drm_connector_state *old_state, + const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + /* CP is being re enabled, ignore this */ + if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && + state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + return false; + } + + /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED */ + if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && + state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + + /* Check if something is connected/enabled, otherwise we start hdcp but nothing is connected/enabled + * hot-plug, headless s3, dpms + */ + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && connector->dpms == DRM_MODE_DPMS_ON && + aconnector->dc_sink != NULL) + return true; + + if (old_state->content_protection == state->content_protection) + return false; + + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + return true; + + return false; +} + +static void update_content_protection(struct drm_connector_state *state, const struct drm_connector *connector, + struct hdcp_workqueue *hdcp_w) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) + hdcp_add_display(hdcp_w, aconnector->dc_link->link_index, aconnector); + else if (state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED) + hdcp_remove_display(hdcp_w, aconnector->dc_link->link_index, aconnector->base.index); + +} +#endif static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, struct dc_stream_state *stream) @@ -5665,6 +5847,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, uint32_t target_vblank, last_flip_vblank; bool vrr_active = amdgpu_dm_vrr_active(acrtc_state); bool pflip_present = false; + bool swizzle = true; struct { struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_plane_info plane_infos[MAX_SURFACES]; @@ -5710,6 +5893,9 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, dc_plane = dm_new_plane_state->dc_state; + if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle) + swizzle = false; + bundle->surface_updates[planes_count].surface = dc_plane; if (new_pcrtc_state->color_mgmt_changed) { bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; @@ -5864,6 +6050,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, /* Update the planes if changed or disable if we don't have any. */ if ((planes_count || acrtc_state->active_planes == 0) && acrtc_state->stream) { + bundle->stream_update.stream = acrtc_state->stream; if (new_pcrtc_state->mode_changed) { bundle->stream_update.src = acrtc_state->stream->src; bundle->stream_update.dst = acrtc_state->stream->dst; @@ -5899,14 +6086,29 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, &acrtc_state->vrr_params.adjust); spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); } - mutex_lock(&dm->dc_lock); + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream); + dc_commit_updates_for_stream(dm->dc, bundle->surface_updates, planes_count, acrtc_state->stream, &bundle->stream_update, dc_state); + + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->psr_version && + !acrtc_state->stream->link->psr_feature_enabled) + amdgpu_dm_link_setup_psr(acrtc_state->stream); + else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_feature_enabled && + !acrtc_state->stream->link->psr_allow_active && + swizzle) { + amdgpu_dm_psr_enable(acrtc_state->stream); + } + mutex_unlock(&dm->dc_lock); } @@ -6215,10 +6417,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) crtc->hwmode = new_crtc_state->mode; } else if (modereset_required(new_crtc_state)) { DRM_DEBUG_DRIVER("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); - /* i.e. reset mode */ - if (dm_old_crtc_state->stream) + if (dm_old_crtc_state->stream) { + if (dm_old_crtc_state->stream->link->psr_allow_active) + amdgpu_dm_psr_disable(dm_old_crtc_state->stream); + remove_stream(adev, acrtc, dm_old_crtc_state->stream); + } } } /* for_each_crtc_in_state() */ @@ -6248,6 +6453,30 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) acrtc->otg_inst = status->primary_otg_inst; } } +#ifdef CONFIG_DRM_AMD_DC_HDCP + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + new_crtc_state = NULL; + + if (acrtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && + connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); + new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + continue; + } + + if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) + update_content_protection(new_con_state, connector, adev->dm.hdcp_workqueue); + } +#endif /* Handle connector state changes */ for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { @@ -6287,9 +6516,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) if (!scaling_changed && !abm_changed && !hdr_changed) continue; + stream_update.stream = dm_new_crtc_state->stream; if (scaling_changed) { update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, - dm_new_con_state, (struct dc_stream_state *)dm_new_crtc_state->stream); + dm_new_con_state, dm_new_crtc_state->stream); stream_update.src = dm_new_crtc_state->stream->src; stream_update.dst = dm_new_crtc_state->stream->dst; @@ -7158,7 +7388,7 @@ dm_determine_update_type_for_commit(struct amdgpu_display_manager *dm, status = dc_stream_get_status_from_state(old_dm_state->context, new_dm_crtc_state->stream); - + stream_update.stream = new_dm_crtc_state->stream; /* * TODO: DC modifies the surface during this call so we need * to lock here - find a way to do this without locking. @@ -7569,3 +7799,92 @@ update: freesync_capable); } +static void amdgpu_dm_set_psr_caps(struct dc_link *link) +{ + uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE]; + + if (!(link->connector_signal & SIGNAL_TYPE_EDP)) + return; + if (link->type == dc_connection_none) + return; + if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT, + dpcd_data, sizeof(dpcd_data))) { + link->psr_feature_enabled = dpcd_data[0] ? true:false; + DRM_INFO("PSR support:%d\n", link->psr_feature_enabled); + } +} + +/* + * amdgpu_dm_link_setup_psr() - configure psr link + * @stream: stream state + * + * Return: true if success + */ +static bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) +{ + struct dc_link *link = NULL; + struct psr_config psr_config = {0}; + struct psr_context psr_context = {0}; + struct dc *dc = NULL; + bool ret = false; + + if (stream == NULL) + return false; + + link = stream->link; + dc = link->ctx->dc; + + psr_config.psr_version = dc->res_pool->dmcu->dmcu_version.psr_version; + + if (psr_config.psr_version > 0) { + psr_config.psr_exit_link_training_required = 0x1; + psr_config.psr_frame_capture_indication_req = 0; + psr_config.psr_rfb_setup_time = 0x37; + psr_config.psr_sdp_transmit_line_num_deadline = 0x20; + psr_config.allow_smu_optimizations = 0x0; + + ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); + + } + DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_feature_enabled); + + return ret; +} + +/* + * amdgpu_dm_psr_enable() - enable psr f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) +{ + struct dc_link *link = stream->link; + struct dc_static_screen_events triggers = {0}; + + DRM_DEBUG_DRIVER("Enabling psr...\n"); + + triggers.cursor_update = true; + triggers.overlay_update = true; + triggers.surface_update = true; + + dc_stream_set_static_screen_events(link->ctx->dc, + &stream, 1, + &triggers); + + return dc_link_set_psr_allow_active(link, true, false); +} + +/* + * amdgpu_dm_psr_disable() - disable psr f/w + * @stream: stream state + * + * Return: true if success + */ +static bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) +{ + + DRM_DEBUG_DRIVER("Disabling psr...\n"); + + return dc_link_set_psr_allow_active(stream->link, false, true); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index c8c525a2b505..77c5166e6b08 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -108,6 +108,12 @@ struct amdgpu_dm_backlight_caps { * @display_indexes_num: Max number of display streams supported * @irq_handler_list_table_lock: Synchronizes access to IRQ tables * @backlight_dev: Backlight control device + * @backlight_link: Link on which to control backlight + * @backlight_caps: Capabilities of the backlight device + * @freesync_module: Module handling freesync calculations + * @fw_dmcu: Reference to DMCU firmware + * @dmcu_fw_version: Version of the DMCU firmware + * @soc_bounding_box: SOC bounding box values provided by gpu_info FW * @cached_state: Caches device atomic state for suspend/resume * @compressor: Frame buffer compression buffer. See &struct dm_comressor_info */ @@ -128,7 +134,7 @@ struct amdgpu_display_manager { u16 display_indexes_num; /** - * @atomic_obj + * @atomic_obj: * * In combination with &dm_atomic_state it helps manage * global atomic state that doesn't map cleanly into existing @@ -225,6 +231,9 @@ struct amdgpu_display_manager { struct amdgpu_dm_backlight_caps backlight_caps; struct mod_freesync *freesync_module; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct hdcp_workqueue *hdcp_workqueue; +#endif struct drm_atomic_state *cached_state; @@ -234,6 +243,8 @@ struct amdgpu_display_manager { uint32_t dmcu_fw_version; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 /** + * @soc_bounding_box: + * * gpu_info FW provided soc bounding box struct or 0 if not * available in FW */ @@ -287,6 +298,7 @@ struct amdgpu_dm_connector { uint32_t debugfs_dpcd_address; uint32_t debugfs_dpcd_size; #endif + bool force_yuv420_output; }; #define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index b43bb7f90e4e..2233d293a707 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -210,6 +210,8 @@ static int __set_legacy_tf(struct dc_transfer_func *func, res = mod_color_calculate_regamma_params(func, gamma, true, has_rom, NULL); + dc_gamma_release(&gamma); + return res ? 0 : -ENOMEM; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c index a549c7c717dd..eaad9099bc0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -122,11 +122,16 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, } /* Configure dithering */ - if (!dm_need_crc_dither(source)) + if (!dm_need_crc_dither(source)) { dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8); - else + dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, + DYN_EXPANSION_DISABLE); + } else { dc_stream_set_dither_option(stream_state, DITHER_OPTION_DEFAULT); + dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, + DYN_EXPANSION_AUTO); + } unlock: mutex_unlock(&adev->dm.dc_lock); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index f3dfb2887ae0..bdb37e611015 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -942,6 +942,52 @@ static const struct { {"aux_dpcd_data", &dp_dpcd_data_debugfs_fops} }; +/* + * Force YUV420 output if available from the given mode + */ +static int force_yuv420_output_set(void *data, u64 val) +{ + struct amdgpu_dm_connector *connector = data; + + connector->force_yuv420_output = (bool)val; + + return 0; +} + +/* + * Check if YUV420 is forced when available from the given mode + */ +static int force_yuv420_output_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + + *val = connector->force_yuv420_output; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get, + force_yuv420_output_set, "%llu\n"); + +/* + * Read PSR state + */ +static int psr_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + struct dc_link *link = connector->dc_link; + uint32_t psr_state = 0; + + dc_link_get_psr_state(link, &psr_state); + + *val = psr_state; + + return 0; +} + + +DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n"); + void connector_debugfs_init(struct amdgpu_dm_connector *connector) { int i; @@ -955,6 +1001,12 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector) dp_debugfs_entries[i].fops); } } + if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) + debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); + + debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector, + &force_yuv420_output_fops); + } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c new file mode 100644 index 000000000000..77181ddf6c8e --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -0,0 +1,346 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "amdgpu_dm_hdcp.h" +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "dm_helpers.h" +#include <drm/drm_hdcp.h> + +static bool +lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) +{ + + struct dc_link *link = handle; + struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} }; + struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + + return dm_helpers_submit_i2c(link->ctx, link, &cmd); +} + +static bool +lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, {false, address, size, data} }; + struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, link->dc->caps.i2c_speed_in_khz}; + + return dm_helpers_submit_i2c(link->ctx, link, &cmd); +} + +static bool +lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size); +} + +static bool +lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size); +} + +static void process_output(struct hdcp_workqueue *hdcp_work) +{ + struct mod_hdcp_output output = hdcp_work->output; + + if (output.callback_stop) + cancel_delayed_work(&hdcp_work->callback_dwork); + + if (output.callback_needed) + schedule_delayed_work(&hdcp_work->callback_dwork, + msecs_to_jiffies(output.callback_delay)); + + if (output.watchdog_timer_stop) + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + + if (output.watchdog_timer_needed) + schedule_delayed_work(&hdcp_work->watchdog_timer_dwork, + msecs_to_jiffies(output.watchdog_timer_delay)); + +} + +void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + struct mod_hdcp_display *display = &hdcp_work[link_index].display; + struct mod_hdcp_link *link = &hdcp_work[link_index].link; + + mutex_lock(&hdcp_w->mutex); + hdcp_w->aconnector = aconnector; + + mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + + schedule_delayed_work(&hdcp_w->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); + +} + +void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, unsigned int display_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + mutex_lock(&hdcp_w->mutex); + + mod_hdcp_remove_display(&hdcp_w->hdcp, display_index, &hdcp_w->output); + + cancel_delayed_work(&hdcp_w->property_validate_dwork); + hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); + +} + +void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + mutex_lock(&hdcp_w->mutex); + + mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output); + + cancel_delayed_work(&hdcp_w->property_validate_dwork); + hdcp_w->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); +} + +void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + schedule_work(&hdcp_w->cpirq_work); +} + + + + +static void event_callback(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, + callback_dwork); + + mutex_lock(&hdcp_work->mutex); + + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK, + &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + + +} +static void event_property_update(struct work_struct *work) +{ + + struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, property_update_work); + struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + struct drm_device *dev = hdcp_work->aconnector->base.dev; + long ret; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + mutex_lock(&hdcp_work->mutex); + + + if (aconnector->base.state->commit) { + ret = wait_for_completion_interruptible_timeout(&aconnector->base.state->commit->hw_done, 10 * HZ); + + if (ret == 0) { + DRM_ERROR("HDCP state unknown! Setting it to DESIRED"); + hdcp_work->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } + } + + if (hdcp_work->encryption_status == MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON) + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_ENABLED); + else + drm_hdcp_update_content_protection(&aconnector->base, DRM_MODE_CONTENT_PROTECTION_DESIRED); + + + mutex_unlock(&hdcp_work->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); +} + +static void event_property_validate(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work = + container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork); + struct mod_hdcp_display_query query; + struct amdgpu_dm_connector *aconnector = hdcp_work->aconnector; + + mutex_lock(&hdcp_work->mutex); + + query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, &query); + + if (query.encryption_status != hdcp_work->encryption_status) { + hdcp_work->encryption_status = query.encryption_status; + schedule_work(&hdcp_work->property_update_work); + } + + schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + + mutex_unlock(&hdcp_work->mutex); +} + +static void event_watchdog_timer(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(to_delayed_work(work), + struct hdcp_workqueue, + watchdog_timer_dwork); + + mutex_lock(&hdcp_work->mutex); + + mod_hdcp_process_event(&hdcp_work->hdcp, + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, + &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + +} + +static void event_cpirq(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work); + + mutex_lock(&hdcp_work->mutex); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); + +} + + +void hdcp_destroy(struct hdcp_workqueue *hdcp_work) +{ + int i = 0; + + for (i = 0; i < hdcp_work->max_link; i++) { + cancel_delayed_work_sync(&hdcp_work[i].callback_dwork); + cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork); + } + + kfree(hdcp_work); + +} + +static void update_config(void *handle, struct cp_psp_stream_config *config) +{ + struct hdcp_workqueue *hdcp_work = handle; + struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; + int link_index = aconnector->dc_link->link_index; + struct mod_hdcp_display *display = &hdcp_work[link_index].display; + struct mod_hdcp_link *link = &hdcp_work[link_index].link; + + memset(display, 0, sizeof(*display)); + memset(link, 0, sizeof(*link)); + + display->index = aconnector->base.index; + display->state = MOD_HDCP_DISPLAY_ACTIVE; + + if (aconnector->dc_sink != NULL) + link->mode = mod_hdcp_signal_type_to_operation_mode(aconnector->dc_sink->sink_signal); + + display->controller = CONTROLLER_ID_D0 + config->otg_inst; + display->dig_fe = config->stream_enc_inst; + link->dig_be = config->link_enc_inst; + link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; + link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; + link->adjust.hdcp2.disable = 1; + +} + +struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc) +{ + + int max_caps = dc->caps.max_links; + struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL); + int i = 0; + + if (hdcp_work == NULL) + goto fail_alloc_context; + + hdcp_work->max_link = max_caps; + + for (i = 0; i < max_caps; i++) { + + mutex_init(&hdcp_work[i].mutex); + + INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq); + INIT_WORK(&hdcp_work[i].property_update_work, event_property_update); + INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback); + INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer); + INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate); + + hdcp_work[i].hdcp.config.psp.handle = psp_context; + hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); + hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c; + hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; + hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd; + hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; + } + + cp_psp->funcs.update_stream_config = update_config; + cp_psp->handle = hdcp_work; + + return hdcp_work; + +fail_alloc_context: + kfree(hdcp_work); + + return NULL; + + + +} + + + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h new file mode 100644 index 000000000000..d3ba505d0696 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -0,0 +1,66 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMDGPU_DM_AMDGPU_DM_HDCP_H_ +#define AMDGPU_DM_AMDGPU_DM_HDCP_H_ + +#include "mod_hdcp.h" +#include "hdcp.h" +#include "dc.h" +#include "dm_cp_psp.h" + +struct mod_hdcp; +struct mod_hdcp_link; +struct mod_hdcp_display; +struct cp_psp; + +struct hdcp_workqueue { + struct work_struct cpirq_work; + struct work_struct property_update_work; + struct delayed_work callback_dwork; + struct delayed_work watchdog_timer_dwork; + struct delayed_work property_validate_dwork; + struct amdgpu_dm_connector *aconnector; + struct mutex mutex; + + struct mod_hdcp hdcp; + struct mod_hdcp_output output; + struct mod_hdcp_display display; + struct mod_hdcp_link link; + + enum mod_hdcp_encryption_status encryption_status; + uint8_t max_link; +}; + +void hdcp_add_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, + struct amdgpu_dm_connector *aconnector); +void hdcp_remove_display(struct hdcp_workqueue *work, unsigned int link_index, unsigned int display_index); +void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index); +void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index); +void hdcp_destroy(struct hdcp_workqueue *work); + +struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc); + +#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index ee1dc75f5ddc..11e5784aa62a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -97,11 +97,10 @@ enum dc_edid_status dm_helpers_parse_edid_caps( (struct edid *) edid->raw_edid); sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); - if (sad_count <= 0) { - DRM_INFO("SADs count is: %d, don't need to read it\n", - sad_count); + if (sad_count < 0) + DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return result; - } edid_caps->audio_mode_count = sad_count < DC_MAX_AUDIO_DESC_COUNT ? sad_count : DC_MAX_AUDIO_DESC_COUNT; for (i = 0; i < edid_caps->audio_mode_count; ++i) { @@ -282,7 +281,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table( * Polls for ACT (allocation change trigger) handled and sends * ALLOCATE_PAYLOAD message. */ -bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( +enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( struct dc_context *ctx, const struct dc_stream_state *stream) { @@ -293,19 +292,19 @@ bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; if (!aconnector || !aconnector->mst_port) - return false; + return ACT_FAILED; mst_mgr = &aconnector->mst_port->mst_mgr; if (!mst_mgr->mst_state) - return false; + return ACT_FAILED; ret = drm_dp_check_act_status(mst_mgr); if (ret) - return false; + return ACT_FAILED; - return true; + return ACT_SUCCESS; } bool dm_helpers_dp_mst_send_payload_allocation( diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c index fa5d503d379c..64445c4cc4c2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -732,8 +732,10 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); @@ -751,6 +753,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev) true); } } + drm_connector_list_iter_end(&iter); } /** @@ -765,8 +768,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) { struct drm_device *dev = adev->ddev; struct drm_connector *connector; + struct drm_connector_list_iter iter; - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; @@ -779,4 +784,5 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) false); } } + drm_connector_list_iter_end(&iter); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 16218a202b59..2bf8534c18fb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -36,7 +36,9 @@ #include "dc_link_ddc.h" #include "i2caux_interface.h" - +#if defined(CONFIG_DEBUG_FS) +#include "amdgpu_dm_debugfs.h" +#endif /* #define TRACE_DPCD */ #ifdef TRACE_DPCD @@ -113,6 +115,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, result = -EIO; break; case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY: + case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE: result = -EBUSY; break; case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT: @@ -123,31 +126,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, return result; } -static enum drm_connector_status -dm_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); - struct amdgpu_dm_connector *master = aconnector->mst_port; - - enum drm_connector_status status = - drm_dp_mst_detect_port( - connector, - &master->mst_mgr, - aconnector->port); - - return status; -} - static void dm_dp_mst_connector_destroy(struct drm_connector *connector) { struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); struct amdgpu_encoder *amdgpu_encoder = amdgpu_dm_connector->mst_encoder; - if (amdgpu_dm_connector->edid) { - kfree(amdgpu_dm_connector->edid); - amdgpu_dm_connector->edid = NULL; - } + kfree(amdgpu_dm_connector->edid); + amdgpu_dm_connector->edid = NULL; drm_encoder_cleanup(&amdgpu_encoder->base); kfree(amdgpu_encoder); @@ -163,6 +149,12 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) to_amdgpu_dm_connector(connector); struct drm_dp_mst_port *port = amdgpu_dm_connector->port; +#if defined(CONFIG_DEBUG_FS) + connector_debugfs_init(amdgpu_dm_connector); + amdgpu_dm_connector->debugfs_dpcd_address = 0; + amdgpu_dm_connector->debugfs_dpcd_size = 0; +#endif + return drm_dp_mst_connector_late_register(connector, port); } @@ -177,7 +169,6 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) } static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { - .detect = dm_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = dm_dp_mst_connector_destroy, .reset = amdgpu_dm_connector_funcs_reset, @@ -245,17 +236,29 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) return ret; } -static struct drm_encoder *dm_mst_best_encoder(struct drm_connector *connector) +static struct drm_encoder * +dm_mst_atomic_best_encoder(struct drm_connector *connector, + struct drm_connector_state *connector_state) { - struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + return &to_amdgpu_dm_connector(connector)->mst_encoder->base; +} + +static int +dm_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *master = aconnector->mst_port; - return &amdgpu_dm_connector->mst_encoder->base; + return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, + aconnector->port); } static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { .get_modes = dm_dp_mst_get_modes, .mode_valid = amdgpu_dm_connector_mode_valid, - .best_encoder = dm_mst_best_encoder, + .atomic_best_encoder = dm_mst_atomic_best_encoder, + .detect_ctx = dm_dp_mst_detect, }; static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) @@ -416,7 +419,11 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, drm_dp_aux_register(&aconnector->dm_dp_aux.aux); drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, - aconnector->base.name, dm->adev->dev); + &aconnector->base); + + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) + return; + aconnector->mst_mgr.cbs = &dm_mst_cbs; drm_dp_mst_topology_mgr_init( &aconnector->mst_mgr, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index f4cfa0caeba8..55a520a63712 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -345,7 +345,7 @@ bool dm_pp_get_clock_levels_by_type( /* Error in pplib. Provide default values. */ return true; } - } else if (adev->smu.funcs && adev->smu.funcs->get_clock_by_type) { + } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_clock_by_type) { if (smu_get_clock_by_type(&adev->smu, dc_to_pp_clock_type(clk_type), &pp_clks)) { @@ -365,7 +365,7 @@ bool dm_pp_get_clock_levels_by_type( validation_clks.memory_max_clock = 80000; validation_clks.level = 0; } - } else if (adev->smu.funcs && adev->smu.funcs->get_max_high_clocks) { + } else if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->get_max_high_clocks) { if (smu_get_max_high_clocks(&adev->smu, &validation_clks)) { DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); validation_clks.engine_max_clock = 72000; @@ -506,8 +506,8 @@ bool dm_pp_apply_clock_for_voltage_request( ret = adev->powerplay.pp_funcs->display_clock_voltage_request( adev->powerplay.pp_handle, &pp_clock_request); - else if (adev->smu.funcs && - adev->smu.funcs->display_clock_voltage_request) + else if (adev->smu.ppt_funcs && + adev->smu.ppt_funcs->display_clock_voltage_request) ret = smu_display_clock_voltage_request(&adev->smu, &pp_clock_request); if (ret) @@ -527,7 +527,7 @@ bool dm_pp_get_static_clocks( ret = adev->powerplay.pp_funcs->get_current_clocks( adev->powerplay.pp_handle, &pp_clk_info); - else if (adev->smu.funcs) + else if (adev->smu.ppt_funcs) ret = smu_get_current_clocks(&adev->smu, &pp_clk_info); if (ret) return false; @@ -589,10 +589,9 @@ void pp_rv_set_wm_ranges(struct pp_smu *pp, if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, &wm_with_clock_ranges); - else if (adev->smu.funcs && - adev->smu.funcs->set_watermarks_for_clock_ranges) + else smu_set_watermarks_for_clock_ranges(&adev->smu, - &wm_with_clock_ranges); + &wm_with_clock_ranges); } void pp_rv_set_pme_wa_enable(struct pp_smu *pp) @@ -604,7 +603,7 @@ void pp_rv_set_pme_wa_enable(struct pp_smu *pp) if (pp_funcs && pp_funcs->notify_smu_enable_pwe) pp_funcs->notify_smu_enable_pwe(pp_handle); - else if (adev->smu.funcs) + else if (adev->smu.ppt_funcs) smu_notify_smu_enable_pwe(&adev->smu); } @@ -665,7 +664,6 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - struct smu_context *smu = &adev->smu; struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges; @@ -708,15 +706,7 @@ enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; } - if (!smu->funcs) - return PP_SMU_RESULT_UNSUPPORTED; - - /* 0: successful or smu.funcs->set_watermarks_for_clock_ranges = NULL; - * 1: fail - */ - if (smu_set_watermarks_for_clock_ranges(&adev->smu, - &wm_with_clock_ranges)) - return PP_SMU_RESULT_UNSUPPORTED; + smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges); return PP_SMU_RESULT_OK; } @@ -727,10 +717,10 @@ enum pp_smu_status pp_nv_set_pme_wa_enable(struct pp_smu *pp) struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - /* 0: successful or smu.funcs->set_azalia_d3_pme = NULL; 1: fail */ + /* 0: successful or smu.ppt_funcs->set_azalia_d3_pme = NULL; 1: fail */ if (smu_set_azalia_d3_pme(smu)) return PP_SMU_RESULT_FAIL; @@ -743,10 +733,10 @@ enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count) struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - /* 0: successful or smu.funcs->set_display_count = NULL; 1: fail */ + /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */ if (smu_set_display_count(smu, count)) return PP_SMU_RESULT_FAIL; @@ -759,10 +749,10 @@ enum pp_smu_status pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz) struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - /* 0: successful or smu.funcs->set_deep_sleep_dcefclk = NULL;1: fail */ + /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */ if (smu_set_deep_sleep_dcefclk(smu, mhz)) return PP_SMU_RESULT_FAIL; @@ -777,13 +767,13 @@ enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq( struct smu_context *smu = &adev->smu; struct pp_display_clock_request clock_req; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = mhz * 1000; - /* 0: successful or smu.funcs->display_clock_voltage_request = NULL + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ if (smu_display_clock_voltage_request(smu, &clock_req)) @@ -799,13 +789,13 @@ enum pp_smu_status pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) struct smu_context *smu = &adev->smu; struct pp_display_clock_request clock_req; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; clock_req.clock_type = amd_pp_mem_clock; clock_req.clock_freq_in_khz = mhz * 1000; - /* 0: successful or smu.funcs->display_clock_voltage_request = NULL + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ if (smu_display_clock_voltage_request(smu, &clock_req)) @@ -835,7 +825,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, struct smu_context *smu = &adev->smu; struct pp_display_clock_request clock_req; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; switch (clock_id) { @@ -853,7 +843,7 @@ enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, } clock_req.clock_freq_in_khz = mhz * 1000; - /* 0: successful or smu.funcs->display_clock_voltage_request = NULL + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ if (smu_display_clock_voltage_request(smu, &clock_req)) @@ -869,13 +859,13 @@ enum pp_smu_status pp_nv_get_maximum_sustainable_clocks( struct amdgpu_device *adev = ctx->driver_context; struct smu_context *smu = &adev->smu; - if (!smu->funcs) + if (!smu->ppt_funcs) return PP_SMU_RESULT_UNSUPPORTED; - if (!smu->funcs->get_max_sustainable_clocks_by_dc) + if (!smu->ppt_funcs->get_max_sustainable_clocks_by_dc) return PP_SMU_RESULT_UNSUPPORTED; - if (!smu->funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks)) + if (!smu_get_max_sustainable_clocks_by_dc(smu, max_clocks)) return PP_SMU_RESULT_OK; return PP_SMU_RESULT_FAIL; @@ -894,13 +884,97 @@ enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, if (!smu->ppt_funcs->get_uclk_dpm_states) return PP_SMU_RESULT_UNSUPPORTED; - if (!smu->ppt_funcs->get_uclk_dpm_states(smu, + if (!smu_get_uclk_dpm_states(smu, clock_values_in_khz, num_states)) return PP_SMU_RESULT_OK; return PP_SMU_RESULT_FAIL; } +#ifdef CONFIG_DRM_AMD_DC_DCN2_1 +enum pp_smu_status pp_rn_get_dpm_clock_table( + struct pp_smu *pp, struct dpm_clocks *clock_table) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu->ppt_funcs->get_dpm_clock_table) + return PP_SMU_RESULT_UNSUPPORTED; + + if (!smu_get_dpm_clock_table(smu, clock_table)) + return PP_SMU_RESULT_OK; + + return PP_SMU_RESULT_FAIL; +} + +enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, + struct pp_smu_wm_range_sets *ranges) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct smu_context *smu = &adev->smu; + struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; + struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = + wm_with_clock_ranges.wm_dmif_clocks_ranges; + struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = + wm_with_clock_ranges.wm_mcif_clocks_ranges; + int32_t i; + + if (!smu->ppt_funcs) + return PP_SMU_RESULT_UNSUPPORTED; + + wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; + wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; + + for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { + if (ranges->reader_wm_sets[i].wm_inst > 3) + wm_dce_clocks[i].wm_set_id = WM_SET_A; + else + wm_dce_clocks[i].wm_set_id = + ranges->reader_wm_sets[i].wm_inst; + + wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].min_drain_clk_mhz; + + wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].max_drain_clk_mhz; + + wm_dce_clocks[i].wm_min_mem_clk_in_khz = + ranges->reader_wm_sets[i].min_fill_clk_mhz; + + wm_dce_clocks[i].wm_max_mem_clk_in_khz = + ranges->reader_wm_sets[i].max_fill_clk_mhz; + } + + for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { + if (ranges->writer_wm_sets[i].wm_inst > 3) + wm_soc_clocks[i].wm_set_id = WM_SET_A; + else + wm_soc_clocks[i].wm_set_id = + ranges->writer_wm_sets[i].wm_inst; + wm_soc_clocks[i].wm_min_socclk_clk_in_khz = + ranges->writer_wm_sets[i].min_fill_clk_mhz; + + wm_soc_clocks[i].wm_max_socclk_clk_in_khz = + ranges->writer_wm_sets[i].max_fill_clk_mhz; + + wm_soc_clocks[i].wm_min_mem_clk_in_khz = + ranges->writer_wm_sets[i].min_drain_clk_mhz; + + wm_soc_clocks[i].wm_max_mem_clk_in_khz = + ranges->writer_wm_sets[i].max_drain_clk_mhz; + } + + smu_set_watermarks_for_clock_ranges(&adev->smu, &wm_with_clock_ranges); + + return PP_SMU_RESULT_OK; +} +#endif + void dm_pp_get_funcs( struct dc_context *ctx, struct pp_smu_funcs *funcs) @@ -945,6 +1019,15 @@ void dm_pp_get_funcs( funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support; break; #endif + +#ifdef CONFIG_DRM_AMD_DC_DCN2_1 + case DCN_VERSION_2_1: + funcs->ctx.ver = PP_SMU_VER_RN; + funcs->rn_funcs.pp_smu.dm = ctx; + funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges; + funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table; + break; +#endif default: DRM_ERROR("smu version is not supported !\n"); break; diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index 627982cb15d2..a160512a2f04 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -48,6 +48,10 @@ DC_LIBS += dce110 DC_LIBS += dce100 DC_LIBS += dce80 +ifdef CONFIG_DRM_AMD_DC_HDCP +DC_LIBS += hdcp +endif + AMD_DC = $(addsuffix /Makefile, $(addprefix $(FULL_AMD_DISPLAY_PATH)/dc/,$(DC_LIBS))) include $(AMD_DC) diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c index 221e0f56389f..823843cd2613 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c @@ -2543,7 +2543,6 @@ static enum bp_result construct_integrated_info( /* Sort voltage table from low to high*/ if (result == BP_RESULT_OK) { - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -2553,10 +2552,8 @@ static enum bp_result construct_integrated_info( info->disp_clk_voltage[j].max_supported_clk < info->disp_clk_voltage[j-1].max_supported_clk) { /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; + swap(info->disp_clk_voltage[j - 1], + info->disp_clk_voltage[j]); } } } diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index dff65c0fe82f..7873abea4112 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1613,8 +1613,6 @@ static enum bp_result construct_integrated_info( struct atom_common_table_header *header; struct atom_data_revision revision; - - struct clock_voltage_caps temp = {0, 0}; uint32_t i; uint32_t j; @@ -1644,10 +1642,8 @@ static enum bp_result construct_integrated_info( info->disp_clk_voltage[j-1].max_supported_clk ) { /* swap j and j - 1*/ - temp = info->disp_clk_voltage[j-1]; - info->disp_clk_voltage[j-1] = - info->disp_clk_voltage[j]; - info->disp_clk_voltage[j] = temp; + swap(info->disp_clk_voltage[j - 1], + info->disp_clk_voltage[j]); } } } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c index c43797bea413..8828dd9c3783 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c @@ -65,6 +65,31 @@ int clk_mgr_helper_get_active_display_cnt( return display_count; } +void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) +{ + struct dc_link *edp_link = get_edp_link(dc); + + if (dc->hwss.exit_optimized_pwr_state) + dc->hwss.exit_optimized_pwr_state(dc, dc->current_state); + + if (edp_link) { + clk_mgr->psr_allow_active_cache = edp_link->psr_allow_active; + dc_link_set_psr_allow_active(edp_link, false, false); + } + +} + +void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr) +{ + struct dc_link *edp_link = get_edp_link(dc); + + if (edp_link) + dc_link_set_psr_allow_active(edp_link, clk_mgr->psr_allow_active_cache, false); + + if (dc->hwss.optimize_pwr_state) + dc->hwss.optimize_pwr_state(dc, dc->current_state); + +} struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c index c5c8c4901eed..26db1c5d4e4d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce100/dce_clk_mgr.c @@ -147,7 +147,7 @@ int dce_get_dp_ref_freq_khz(struct clk_mgr *clk_mgr_base) /* Calculate the current DFS clock, in kHz.*/ dp_ref_clk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz) / target_div; + * clk_mgr->base.dentist_vco_freq_khz) / target_div; return dce_adjust_dp_ref_freq_for_ss(clk_mgr, dp_ref_clk_khz); } @@ -239,7 +239,7 @@ int dce_set_clock( /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, - clk_mgr_dce->dentist_vco_freq_khz / 64); + clk_mgr_dce->base.dentist_vco_freq_khz / 64); /* Prepare to program display clock*/ pxl_clk_params.target_pixel_clock_100hz = requested_clk_khz * 10; @@ -276,11 +276,11 @@ static void dce_clock_read_integrated_info(struct clk_mgr_internal *clk_mgr_dce) int i; if (bp->integrated_info) - clk_mgr_dce->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; - if (clk_mgr_dce->dentist_vco_freq_khz == 0) { - clk_mgr_dce->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; - if (clk_mgr_dce->dentist_vco_freq_khz == 0) - clk_mgr_dce->dentist_vco_freq_khz = 3600000; + clk_mgr_dce->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; + if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) { + clk_mgr_dce->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; + if (clk_mgr_dce->base.dentist_vco_freq_khz == 0) + clk_mgr_dce->base.dentist_vco_freq_khz = 3600000; } /*update the maximum display clock for each power state*/ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c index 7c746ef1e32e..a6c46e903ff9 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dce112/dce112_clk_mgr.c @@ -81,7 +81,7 @@ int dce112_set_clock(struct clk_mgr *clk_mgr_base, int requested_clk_khz) /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, - clk_mgr_dce->dentist_vco_freq_khz / 62); + clk_mgr_dce->base.dentist_vco_freq_khz / 62); dce_clk_params.target_clock_frequency = requested_clk_khz; dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; @@ -135,7 +135,7 @@ int dce112_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_clk_khz) /* Make sure requested clock isn't lower than minimum threshold*/ if (requested_clk_khz > 0) requested_clk_khz = max(requested_clk_khz, - clk_mgr->dentist_vco_freq_khz / 62); + clk_mgr->base.dentist_vco_freq_khz / 62); dce_clk_params.target_clock_frequency = requested_clk_khz; dce_clk_params.pll_id = CLOCK_SOURCE_ID_DFS; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c index 47f529ce280a..3fab9296918a 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn10/rv1_clk_mgr.c @@ -139,6 +139,9 @@ static void rv1_update_clocks(struct clk_mgr *clk_mgr_base, ASSERT(clk_mgr->pp_smu); + if (dc->work_arounds.skip_clock_update) + return; + pp_smu = &clk_mgr->pp_smu->rv_funcs; display_count = clk_mgr_helper_get_active_display_cnt(dc, context); @@ -266,11 +269,11 @@ void rv1_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_ clk_mgr->base.dprefclk_khz = 600000; if (bp->integrated_info) - clk_mgr->dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; - if (bp->fw_info_valid && clk_mgr->dentist_vco_freq_khz == 0) { - clk_mgr->dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; - if (clk_mgr->dentist_vco_freq_khz == 0) - clk_mgr->dentist_vco_freq_khz = 3600000; + clk_mgr->base.dentist_vco_freq_khz = bp->integrated_info->dentist_vco_freq; + if (bp->fw_info_valid && clk_mgr->base.dentist_vco_freq_khz == 0) { + clk_mgr->base.dentist_vco_freq_khz = bp->fw_info.smu_gpu_pll_output_freq; + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3600000; } if (!debug->disable_dfs_bypass && bp->integrated_info) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 3e8ac303bd52..25d7b7c6681c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -104,84 +104,39 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, { int i; + clk_mgr->dccg->ref_dppclk = clk_mgr->base.clks.dppclk_khz; for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { int dpp_inst, dppclk_khz; - if (!context->res_ctx.pipe_ctx[i].plane_state) - continue; - - dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; + /* Loop index will match dpp->inst if resource exists, + * and we want to avoid dependency on dpp object + */ + dpp_inst = i; dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + clk_mgr->dccg->funcs->update_dpp_dto( - clk_mgr->dccg, dpp_inst, dppclk_khz, false); + clk_mgr->dccg, dpp_inst, dppclk_khz); } } -static void update_global_dpp_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz) +void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr) { int dpp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz / khz; - - uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider); - - REG_UPDATE(DENTIST_DISPCLK_CNTL, - DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider); - REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100); -} - -static void update_display_clk(struct clk_mgr_internal *clk_mgr, unsigned int khz) -{ + * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dppclk_khz; int disp_divider = DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz / khz; + * clk_mgr->base.dentist_vco_freq_khz / clk_mgr->base.clks.dispclk_khz; + uint32_t dppclk_wdivider = dentist_get_did_from_divider(dpp_divider); uint32_t dispclk_wdivider = dentist_get_did_from_divider(disp_divider); REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_wdivider); +// REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_CHG_DONE, 1, 5, 100); + REG_UPDATE(DENTIST_DISPCLK_CNTL, + DENTIST_DPPCLK_WDIVIDER, dppclk_wdivider); + REG_WAIT(DENTIST_DISPCLK_CNTL, DENTIST_DPPCLK_CHG_DONE, 1, 5, 100); } -static void request_voltage_and_program_disp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc *dc = clk_mgr_base->ctx->dc; - struct pp_smu_funcs_nv *pp_smu = NULL; - bool going_up = clk_mgr->base.clks.dispclk_khz < khz; - - if (dc->res_pool->pp_smu) - pp_smu = &dc->res_pool->pp_smu->nv_funcs; - - clk_mgr->base.clks.dispclk_khz = khz; - - if (going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000); - - update_display_clk(clk_mgr, khz); - - if (!going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000); -} - -static void request_voltage_and_program_global_dpp_clk(struct clk_mgr *clk_mgr_base, unsigned int khz) -{ - struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); - struct dc *dc = clk_mgr_base->ctx->dc; - struct pp_smu_funcs_nv *pp_smu = NULL; - bool going_up = clk_mgr->base.clks.dppclk_khz < khz; - - if (dc->res_pool->pp_smu) - pp_smu = &dc->res_pool->pp_smu->nv_funcs; - - clk_mgr->base.clks.dppclk_khz = khz; - clk_mgr->dccg->ref_dppclk = khz; - - if (going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000); - - update_global_dpp_clk(clk_mgr, khz); - - if (!going_up && pp_smu && pp_smu->set_voltage_by_freq) - pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000); -} void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, @@ -192,11 +147,12 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, struct dc *dc = clk_mgr_base->ctx->dc; struct pp_smu_funcs_nv *pp_smu = NULL; int display_count; + bool update_dppclk = false; bool update_dispclk = false; bool enter_display_off = false; + bool dpp_clock_lowered = false; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; bool force_reset = false; - int i; if (dc->work_arounds.skip_clock_update) return; @@ -251,12 +207,10 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; - clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support; if (pp_smu && pp_smu->set_pstate_handshake_support) pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support); } - clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; if (should_set_clock(safe_to_lower, new_clocks->dramclk_khz, clk_mgr_base->clks.dramclk_khz)) { clk_mgr_base->clks.dramclk_khz = new_clocks->dramclk_khz; @@ -264,50 +218,40 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, pp_smu->set_hard_min_uclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.dramclk_khz / 1000); } - if (dc->config.forced_clocks == false) { - // First update display clock - if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) - request_voltage_and_program_disp_clk(clk_mgr_base, new_clocks->dispclk_khz); - - // Updating DPP clock requires some more logic - if (!safe_to_lower) { - // For pre-programming, we need to make sure any DPP clock that will go up has to go up + if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) { + if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz) + dpp_clock_lowered = true; + clk_mgr->base.clks.dppclk_khz = new_clocks->dppclk_khz; - // First raise the global reference if needed - if (new_clocks->dppclk_khz > clk_mgr_base->clks.dppclk_khz) - request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz); + if (pp_smu && pp_smu->set_voltage_by_freq) + pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_PIXELCLK, clk_mgr_base->clks.dppclk_khz / 1000); - // Then raise any dividers that need raising - for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz; + update_dppclk = true; + } - if (!context->res_ctx.pipe_ctx[i].plane_state) - continue; + if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { + clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; + if (pp_smu && pp_smu->set_voltage_by_freq) + pp_smu->set_voltage_by_freq(&pp_smu->pp_smu, PP_SMU_NV_DISPCLK, clk_mgr_base->clks.dispclk_khz / 1000); - dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; - dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; + update_dispclk = true; + } - clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, true); - } + if (dc->config.forced_clocks == false || (force_reset && safe_to_lower)) { + if (dpp_clock_lowered) { + // if clock is being lowered, increase DTO before lowering refclk + dcn20_update_clocks_update_dpp_dto(clk_mgr, context); + dcn20_update_clocks_update_dentist(clk_mgr); } else { - // For post-programming, we can lower ref clk if needed, and unconditionally set all the DTOs - - if (new_clocks->dppclk_khz < clk_mgr_base->clks.dppclk_khz) - request_voltage_and_program_global_dpp_clk(clk_mgr_base, new_clocks->dppclk_khz); - - for (i = 0; i < clk_mgr->base.ctx->dc->res_pool->pipe_count; i++) { - int dpp_inst, dppclk_khz; - - if (!context->res_ctx.pipe_ctx[i].plane_state) - continue; - - dpp_inst = context->res_ctx.pipe_ctx[i].plane_res.dpp->inst; - dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz; - - clk_mgr->dccg->funcs->update_dpp_dto(clk_mgr->dccg, dpp_inst, dppclk_khz, false); - } + // if clock is being raised, increase refclk before lowering DTO + if (update_dppclk || update_dispclk) + dcn20_update_clocks_update_dentist(clk_mgr); + // always update dtos unless clock is lowered and not safe to lower + if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) + dcn20_update_clocks_update_dpp_dto(clk_mgr, context); } } + if (update_dispclk && dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { /*update dmcu for wait_loop count*/ @@ -320,6 +264,8 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, struct dc_state *context, bool safe_to_lower) { + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); + struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; /* Min fclk = 1.2GHz since all the extra scemi logic seems to run off of it */ int fclk_adj = new_clocks->fclk_khz > 1200000 ? new_clocks->fclk_khz : 1200000; @@ -357,14 +303,18 @@ void dcn2_update_clocks_fpga(struct clk_mgr *clk_mgr, clk_mgr->clks.dispclk_khz = new_clocks->dispclk_khz; } - /* Both fclk and dppclk ref are run on the same scemi clock so we - * need to keep the same value for both + /* Both fclk and ref_dppclk run on the same scemi clock. + * So take the higher value since the DPP DTO is typically programmed + * such that max dppclk is 1:1 with ref_dppclk. */ if (clk_mgr->clks.fclk_khz > clk_mgr->clks.dppclk_khz) clk_mgr->clks.dppclk_khz = clk_mgr->clks.fclk_khz; if (clk_mgr->clks.dppclk_khz > clk_mgr->clks.fclk_khz) clk_mgr->clks.fclk_khz = clk_mgr->clks.dppclk_khz; + // Both fclk and ref_dppclk run on the same scemi clock. + clk_mgr_int->dccg->ref_dppclk = clk_mgr->clks.fclk_khz; + dm_set_dcn_clocks(clk_mgr->ctx, &clk_mgr->clks); } @@ -409,12 +359,36 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr, } } +static bool dcn2_are_clock_states_equal(struct dc_clocks *a, + struct dc_clocks *b) +{ + if (a->dispclk_khz != b->dispclk_khz) + return false; + else if (a->dppclk_khz != b->dppclk_khz) + return false; + else if (a->dcfclk_khz != b->dcfclk_khz) + return false; + else if (a->socclk_khz != b->socclk_khz) + return false; + else if (a->dcfclk_deep_sleep_khz != b->dcfclk_deep_sleep_khz) + return false; + else if (a->phyclk_khz != b->phyclk_khz) + return false; + else if (a->dramclk_khz != b->dramclk_khz) + return false; + else if (a->p_state_change_support != b->p_state_change_support) + return false; + + return true; +} + static struct clk_mgr_funcs dcn2_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .update_clocks = dcn2_update_clocks, .init_clocks = dcn2_init_clocks, .enable_pme_wa = dcn2_enable_pme_wa, .get_clock = dcn2_get_clock, + .are_clock_states_equal = dcn2_are_clock_states_equal, }; @@ -442,7 +416,7 @@ void dcn20_clk_mgr_construct( if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { dcn2_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->dentist_vco_freq_khz = 3850000; + clk_mgr->base.dentist_vco_freq_khz = 3850000; } else { /* DFS Slice 2 should be used for DPREFCLK */ @@ -466,15 +440,15 @@ void dcn20_clk_mgr_construct( pll_req = dc_fixpt_mul_int(pll_req, 100000); /* integer part is now VCO frequency in kHz */ - clk_mgr->dentist_vco_freq_khz = dc_fixpt_floor(pll_req); + clk_mgr->base.dentist_vco_freq_khz = dc_fixpt_floor(pll_req); /* in case we don't get a value from the register, use default */ - if (clk_mgr->dentist_vco_freq_khz == 0) - clk_mgr->dentist_vco_freq_khz = 3850000; + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3850000; /* Calculate the DPREFCLK in kHz.*/ clk_mgr->base.dprefclk_khz = (DENTIST_DIVIDER_RANGE_SCALE_FACTOR - * clk_mgr->dentist_vco_freq_khz) / target_div; + * clk_mgr->base.dentist_vco_freq_khz) / target_div; } //Integrated_info table does not exist on dGPU projects so should not be referenced //anywhere in code for dGPUs. diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h index ac31a9787305..c9fd824f3c23 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.h @@ -50,4 +50,5 @@ void dcn2_get_clock(struct clk_mgr *clk_mgr, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); +void dcn20_update_clocks_update_dentist(struct clk_mgr_internal *clk_mgr); #endif //__DCN20_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 787f94d815f4..790a2d211bd6 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -52,6 +52,45 @@ #define REG(reg_name) \ (CLK_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + +/* TODO: evaluate how to lower or disable all dcn clocks in screen off case */ +int rn_get_active_display_cnt_wa( + struct dc *dc, + struct dc_state *context) +{ + int i, display_count; + bool hdmi_present = false; + + display_count = 0; + for (i = 0; i < context->stream_count; i++) { + const struct dc_stream_state *stream = context->streams[i]; + + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) + hdmi_present = true; + } + + for (i = 0; i < dc->link_count; i++) { + const struct dc_link *link = dc->links[i]; + + /* + * Only notify active stream or virtual stream. + * Need to notify virtual stream to work around + * headless case. HPD does not fire when system is in + * S0i2. + */ + /* abusing the fact that the dig and phy are coupled to see if the phy is enabled */ + if (link->connector_signal == SIGNAL_TYPE_VIRTUAL || + link->link_enc->funcs->is_dig_enabled(link->link_enc)) + display_count++; + } + + /* WA for hang on HDMI after display off back back on*/ + if (display_count == 0 && hdmi_present) + display_count = 1; + + return display_count; +} + void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool safe_to_lower) @@ -62,17 +101,36 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, int display_count; bool update_dppclk = false; bool update_dispclk = false; - bool enter_display_off = false; bool dpp_clock_lowered = false; - struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; - display_count = clk_mgr_helper_get_active_display_cnt(dc, context); + struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; - if (display_count == 0) - enter_display_off = true; + if (dc->work_arounds.skip_clock_update) + return; - if (enter_display_off == safe_to_lower) { - rn_vbios_smu_set_display_count(clk_mgr, display_count); + /* + * if it is safe to lower, but we are already in the lower state, we don't have to do anything + * also if safe to lower is false, we just go in the higher state + */ + if (safe_to_lower) { + /* check that we're not already in lower */ + if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) { + + display_count = rn_get_active_display_cnt_wa(dc, context); + /* if we can go lower, go lower */ + if (display_count == 0) { + rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; + } + } + } else { + /* check that we're not already in D0 */ + if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_MISSION_MODE) { + rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_MISSION_MODE); + /* update power state */ + clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_MISSION_MODE; + } } if (should_set_clock(safe_to_lower, new_clocks->phyclk_khz, clk_mgr_base->clks.phyclk_khz)) { @@ -113,7 +171,8 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base, // if clock is being raised, increase refclk before lowering DTO if (update_dppclk || update_dispclk) rn_vbios_smu_set_dppclk(clk_mgr, clk_mgr_base->clks.dppclk_khz); - if (update_dppclk) + // always update dtos unless clock is lowered and not safe to lower + if (new_clocks->dppclk_khz >= dc->current_state->bw_ctx.bw.dcn.clk.dppclk_khz) dcn20_update_clocks_update_dpp_dto(clk_mgr, context); } @@ -319,7 +378,7 @@ void rn_get_clk_states(struct clk_mgr *clk_mgr_base, struct clk_states *s) rn_dump_clk_registers(&sb, clk_mgr_base, &log_info); - s->dprefclk_khz = sb.dprefclk; + s->dprefclk_khz = sb.dprefclk * 1000; } void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) @@ -329,12 +388,96 @@ void rn_enable_pme_wa(struct clk_mgr *clk_mgr_base) rn_vbios_smu_enable_pme_wa(clk_mgr); } +void rn_init_clocks(struct clk_mgr *clk_mgr) +{ + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + // Assumption is that boot state always supports pstate + clk_mgr->clks.p_state_change_support = true; + clk_mgr->clks.prev_p_state_change_support = true; + clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; +} + +void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges) +{ + int i, num_valid_sets; + + num_valid_sets = 0; + + for (i = 0; i < WM_SET_COUNT; i++) { + /* skip empty entries, the smu array has no holes*/ + if (!bw_params->wm_table.entries[i].valid) + continue; + + ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst; + ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;; + /* We will not select WM based on dcfclk, so leave it as unconstrained */ + ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + /* fclk wil be used to select WM*/ + + if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) { + if (i == 0) + ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0; + else { + /* add 1 to make it non-overlapping with next lvl */ + ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1; + } + ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz; + + } else { + /* unconstrained for memory retraining */ + ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + + /* Modify previous watermark range to cover up to max */ + ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + } + num_valid_sets++; + } + + ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ + ranges->num_reader_wm_sets = num_valid_sets; + + /* modify the min and max to make sure we cover the whole range*/ + ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + + /* This is for writeback only, does not matter currently as no writeback support*/ + ranges->num_writer_wm_sets = 1; + ranges->writer_wm_sets[0].wm_inst = WM_A; + ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; + ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + +} + +static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base) +{ + struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug; + struct pp_smu_wm_range_sets ranges = {0}; + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu; + + if (!debug->disable_pplib_wm_range) { + build_watermark_ranges(clk_mgr_base->bw_params, &ranges); + + /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ + if (pp_smu && pp_smu->rn_funcs.set_wm_ranges) + pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges); + } + +} + static struct clk_mgr_funcs dcn21_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, .update_clocks = rn_update_clocks, - .init_clocks = dcn2_init_clocks, + .init_clocks = rn_init_clocks, .enable_pme_wa = rn_enable_pme_wa, - /* .dump_clk_registers = rn_dump_clk_registers */ + /* .dump_clk_registers = rn_dump_clk_registers, */ + .notify_wm_ranges = rn_notify_wm_ranges }; struct clk_bw_params rn_bw_params = { @@ -405,80 +548,50 @@ struct clk_bw_params rn_bw_params = { } }; -void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges) +static unsigned int find_dcfclk_for_voltage(struct dpm_clocks *clock_table, unsigned int voltage) { - int i, num_valid_sets; - - num_valid_sets = 0; - - for (i = 0; i < WM_SET_COUNT; i++) { - /* skip empty entries, the smu array has no holes*/ - if (!bw_params->wm_table.entries[i].valid) - continue; - - ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst; - ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;; - /* We will not select WM based on dcfclk, so leave it as unconstrained */ - ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - /* fclk wil be used to select WM*/ - - if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) { - if (i == 0) - ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0; - else { - /* add 1 to make it non-overlapping with next lvl */ - ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1; - } - ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz; - - } else { - /* unconstrained for memory retraining */ - ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; + int i; - /* Modify previous watermark range to cover up to max */ - ranges->reader_wm_sets[num_valid_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - } - num_valid_sets++; + for (i = 0; i < PP_SMU_NUM_DCFCLK_DPM_LEVELS; i++) { + if (clock_table->DcfClocks[i].Vol == voltage) + return clock_table->DcfClocks[i].Freq; } - ASSERT(num_valid_sets != 0); /* Must have at least one set of valid watermarks */ - ranges->num_reader_wm_sets = num_valid_sets; - - /* modify the min and max to make sure we cover the whole range*/ - ranges->reader_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->reader_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - ranges->reader_wm_sets[ranges->num_reader_wm_sets - 1].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - - /* This is for writeback only, does not matter currently as no writeback support*/ - ranges->num_writer_wm_sets = 1; - ranges->writer_wm_sets[0].wm_inst = WM_A; - ranges->writer_wm_sets[0].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->writer_wm_sets[0].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - ranges->writer_wm_sets[0].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN; - ranges->writer_wm_sets[0].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX; - + ASSERT(0); + return 0; } -void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id) +static void rn_clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct dpm_clocks *clock_table, struct hw_asic_id *asic_id) { - int i; + int i, j = 0; + + j = -1; ASSERT(PP_SMU_NUM_FCLK_DPM_LEVELS <= MAX_NUM_DPM_LVL); - for (i = 0; i < PP_SMU_NUM_FCLK_DPM_LEVELS; i++) { - if (clock_table->FClocks[i].Freq == 0) + /* Find lowest DPM, FCLK is filled in reverse order*/ + + for (i = PP_SMU_NUM_FCLK_DPM_LEVELS - 1; i >= 0; i--) { + if (clock_table->FClocks[i].Freq != 0) { + j = i; break; + } + } + + if (j == -1) { + /* clock table is all 0s, just use our own hardcode */ + ASSERT(0); + return; + } + + bw_params->clk_table.num_entries = j + 1; - bw_params->clk_table.entries[i].dcfclk_mhz = clock_table->DcfClocks[i].Freq; - bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[i].Freq; - bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[i].Freq; - bw_params->clk_table.entries[i].socclk_mhz = clock_table->SocClocks[i].Freq; - bw_params->clk_table.entries[i].voltage = clock_table->FClocks[i].Vol; + for (i = 0; i < bw_params->clk_table.num_entries; i++, j--) { + bw_params->clk_table.entries[i].fclk_mhz = clock_table->FClocks[j].Freq; + bw_params->clk_table.entries[i].memclk_mhz = clock_table->MemClocks[j].Freq; + bw_params->clk_table.entries[i].voltage = clock_table->FClocks[j].Vol; + bw_params->clk_table.entries[i].dcfclk_mhz = find_dcfclk_for_voltage(clock_table, clock_table->FClocks[j].Vol); } - bw_params->clk_table.num_entries = i; bw_params->vram_type = asic_id->vram_type; bw_params->num_channels = asic_id->vram_width / DDR4_DRAM_WIDTH; @@ -486,7 +599,7 @@ void clk_mgr_helper_populate_bw_params(struct clk_bw_params *bw_params, struct d for (i = 0; i < WM_SET_COUNT; i++) { bw_params->wm_table.entries[i].wm_inst = i; - if (clock_table->FClocks[i].Freq == 0) { + if (i >= bw_params->clk_table.num_entries) { bw_params->wm_table.entries[i].valid = false; continue; } @@ -534,57 +647,42 @@ void rn_clk_mgr_construct( if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment)) { dcn21_funcs.update_clocks = dcn2_update_clocks_fpga; - clk_mgr->dentist_vco_freq_khz = 3600000; + clk_mgr->base.dentist_vco_freq_khz = 3600000; clk_mgr->base.dprefclk_khz = 600000; } else { struct clk_log_info log_info = {0}; /* TODO: Check we get what we expect during bringup */ - clk_mgr->dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); + clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr); /* in case we don't get a value from the register, use default */ - if (clk_mgr->dentist_vco_freq_khz == 0) - clk_mgr->dentist_vco_freq_khz = 3600000; + if (clk_mgr->base.dentist_vco_freq_khz == 0) + clk_mgr->base.dentist_vco_freq_khz = 3600000; rn_dump_clk_registers(&s, &clk_mgr->base, &log_info); - clk_mgr->base.dprefclk_khz = s.dprefclk; - - if (clk_mgr->base.dprefclk_khz != 600000) { - clk_mgr->base.dprefclk_khz = 600000; - ASSERT(1); //TODO: Renoir follow up. - } + /* Convert dprefclk units from MHz to KHz */ + /* Value already divided by 10, some resolution lost */ + clk_mgr->base.dprefclk_khz = s.dprefclk * 1000; /* in case we don't get a value from the register, use default */ - if (clk_mgr->base.dprefclk_khz == 0) + if (clk_mgr->base.dprefclk_khz == 0) { + ASSERT(clk_mgr->base.dprefclk_khz == 600000); clk_mgr->base.dprefclk_khz = 600000; + } } dce_clock_read_ss_info(clk_mgr); clk_mgr->base.bw_params = &rn_bw_params; - if (pp_smu) { + if (pp_smu && pp_smu->rn_funcs.get_dpm_clock_table) { pp_smu->rn_funcs.get_dpm_clock_table(&pp_smu->rn_funcs.pp_smu, &clock_table); - clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id); + rn_clk_mgr_helper_populate_bw_params(clk_mgr->base.bw_params, &clock_table, &ctx->asic_id); } - /* - * Notify SMU which set of WM should be selected for different ranges of fclk - * On Renoir there is a maximumum of 4 DF pstates supported, could be less - * depending on DDR speed and fused maximum fclk. - */ - if (!debug->disable_pplib_wm_range) { - struct pp_smu_wm_range_sets ranges = {0}; - - build_watermark_ranges(clk_mgr->base.bw_params, &ranges); - - /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ - if (pp_smu && pp_smu->rn_funcs.set_wm_ranges) - pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges); + if (!IS_FPGA_MAXIMUS_DC(ctx->dce_environment) && clk_mgr->smu_ver >= 0x00371500) { + /* enable powerfeatures when displaycount goes to 0 */ + rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr, !debug->disable_48mhz_pwrdwn); } - - /* enable powerfeatures when displaycount goes to 0 */ - if (!debug->disable_48mhz_pwrdwn) - rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(clk_mgr); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h index aadec06fde10..e4322fa5475b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.h @@ -26,11 +26,13 @@ #ifndef __RN_CLK_MGR_H__ #define __RN_CLK_MGR_H__ +#include "clk_mgr.h" +#include "dm_pp_smu.h" + struct rn_clk_registers { uint32_t CLK1_CLK0_CURRENT_CNT; /* DPREFCLK */ }; - void rn_clk_mgr_construct(struct dc_context *ctx, struct clk_mgr_internal *clk_mgr, struct pp_smu_funcs *pp_smu, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c index 50984c1811bb..cb7c0e8b7e1b 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.c @@ -33,7 +33,7 @@ #include "mp/mp_12_0_0_sh_mask.h" #define REG(reg_name) \ - (MP1_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + (MP0_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) #define FN(reg_name, field) \ FD(reg_name##__##field) @@ -84,16 +84,12 @@ int rn_vbios_smu_set_dispclk(struct clk_mgr_internal *clk_mgr, int requested_dis int actual_dispclk_set_mhz = -1; struct dc *core_dc = clk_mgr->base.ctx->dc; struct dmcu *dmcu = core_dc->res_pool->dmcu; - uint32_t clk = requested_dispclk_khz / 1000; - - if (clk <= 100) - clk = 101; /* Unit of SMU msg parameter is Mhz */ actual_dispclk_set_mhz = rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDispclkFreq, - clk); + requested_dispclk_khz / 1000); if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) { if (dmcu && dmcu->funcs->is_dmcu_initialized(dmcu)) { @@ -124,7 +120,7 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque { int actual_dcfclk_set_mhz = -1; - if (clk_mgr->smu_ver < 0xFFFFFFFF) + if (clk_mgr->smu_ver < 0x370c00) return actual_dcfclk_set_mhz; actual_dcfclk_set_mhz = rn_vbios_smu_send_msg_with_param( @@ -139,7 +135,7 @@ int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int { int actual_min_ds_dcfclk_mhz = -1; - if (clk_mgr->smu_ver < 0xFFFFFFFF) + if (clk_mgr->smu_ver < 0x370c00) return actual_min_ds_dcfclk_mhz; actual_min_ds_dcfclk_mhz = rn_vbios_smu_send_msg_with_param( @@ -162,33 +158,35 @@ int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_ { int actual_dppclk_set_mhz = -1; - uint32_t clk = requested_dpp_khz / 1000; - - if (clk <= 100) - clk = 101; - actual_dppclk_set_mhz = rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_SetDppclkFreq, - clk); + requested_dpp_khz / 1000); return actual_dppclk_set_mhz * 1000; } -void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count) +void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, enum dcn_pwr_state state) { + int disp_count; + + if (state == DCN_PWR_STATE_LOW_POWER) + disp_count = 0; + else + disp_count = 1; + rn_vbios_smu_send_msg_with_param( - clk_mgr, - VBIOSSMC_MSG_SetDisplayCount, - display_count); + clk_mgr, + VBIOSSMC_MSG_SetDisplayCount, + disp_count); } -void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr) +void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable) { rn_vbios_smu_send_msg_with_param( clk_mgr, VBIOSSMC_MSG_EnableTmdp48MHzRefclkPwrDown, - 0); + enable); } void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h index da3a49487c6d..ccc01879c9d4 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr_vbios_smu.h @@ -33,8 +33,8 @@ int rn_vbios_smu_set_hard_min_dcfclk(struct clk_mgr_internal *clk_mgr, int reque int rn_vbios_smu_set_min_deep_sleep_dcfclk(struct clk_mgr_internal *clk_mgr, int requested_min_ds_dcfclk_khz); void rn_vbios_smu_set_phyclk(struct clk_mgr_internal *clk_mgr, int requested_phyclk_khz); int rn_vbios_smu_set_dppclk(struct clk_mgr_internal *clk_mgr, int requested_dpp_khz); -void rn_vbios_smu_set_display_count(struct clk_mgr_internal *clk_mgr, int display_count); -void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr); +void rn_vbios_smu_set_dcn_low_power_state(struct clk_mgr_internal *clk_mgr, int display_count); +void rn_vbios_smu_enable_48mhz_tmdp_refclk_pwrdwn(struct clk_mgr_internal *clk_mgr, bool enable); void rn_vbios_smu_enable_pme_wa(struct clk_mgr_internal *clk_mgr); #endif /* DAL_DC_DCN10_RV1_CLK_MGR_VBIOS_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 4b8819c27fcd..32f31bf91915 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -194,7 +194,7 @@ static bool create_links( } } - if (!should_destory_link) { + if (dc->config.force_enum_edp || !should_destory_link) { dc->links[dc->link_count] = link; link->dc = dc; ++dc->link_count; @@ -411,6 +411,27 @@ bool dc_stream_get_crc(struct dc *dc, struct dc_stream_state *stream, return false; } +void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, + enum dc_dynamic_expansion option) +{ + /* OPP FMT dyn expansion updates*/ + int i = 0; + struct pipe_ctx *pipe_ctx; + + for (i = 0; i < MAX_PIPES; i++) { + if (dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i]; + pipe_ctx->stream_res.opp->dyn_expansion = option; + pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( + pipe_ctx->stream_res.opp, + COLOR_SPACE_YCBCR601, + stream->timing.display_color_depth, + stream->signal); + } + } +} + void dc_stream_set_dither_option(struct dc_stream_state *stream, enum dc_dither_option option) { @@ -765,8 +786,13 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context) #if defined(CONFIG_DRM_AMD_DC_DCN2_0) disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context); #endif - dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context); } +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, dangling_context); +#endif } current_ctx = dc->current_state; @@ -789,9 +815,6 @@ struct dc *dc_create(const struct dc_init_data *init_params) if (false == construct(dc, init_params)) goto construct_fail; - /*TODO: separate HW and SW initialization*/ - dc->hwss.init_hw(dc); - full_pipe_count = dc->res_pool->pipe_count; if (dc->res_pool->underlay_pipe_index != NO_UNDERLAY_PIPE) full_pipe_count--; @@ -824,9 +847,24 @@ alloc_fail: return NULL; } +void dc_hardware_init(struct dc *dc) +{ + dc->hwss.init_hw(dc); +} + void dc_init_callbacks(struct dc *dc, const struct dc_callback_init *init_params) { +#ifdef CONFIG_DRM_AMD_DC_HDCP + dc->ctx->cp_psp = init_params->cp_psp; +#endif +} + +void dc_deinit_callbacks(struct dc *dc) +{ +#ifdef CONFIG_DRM_AMD_DC_HDCP + memset(&dc->ctx->cp_psp, 0, sizeof(dc->ctx->cp_psp)); +#endif } void dc_destroy(struct dc **dc) @@ -905,15 +943,11 @@ static void program_timing_sync( /* set first pipe with plane as master */ for (j = 0; j < group_size; j++) { - struct pipe_ctx *temp; - if (pipe_set[j]->plane_state) { if (j == 0) break; - temp = pipe_set[0]; - pipe_set[0] = pipe_set[j]; - pipe_set[j] = temp; + swap(pipe_set[0], pipe_set[j]); break; } } @@ -970,40 +1004,87 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc, struct dc_crtc_timing *crtc_timing) { struct timing_generator *tg; + struct stream_encoder *se = NULL; + + struct dc_crtc_timing hw_crtc_timing = {0}; + struct dc_link *link = sink->link; - unsigned int enc_inst, tg_inst; + unsigned int i, enc_inst, tg_inst = 0; + + // Seamless port only support single DP and EDP so far + if (sink->sink_signal != SIGNAL_TYPE_DISPLAY_PORT && + sink->sink_signal != SIGNAL_TYPE_EDP) + return false; /* Check for enabled DIG to identify enabled display */ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) return false; - /* Check for which front end is used by this encoder. - * Note the inst is 1 indexed, where 0 is undefined. - * Note that DIG_FE can source from different OTG but our - * current implementation always map 1-to-1, so this code makes - * the same assumption and doesn't check OTG source. - */ enc_inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); - /* Instance should be within the range of the pool */ - if (enc_inst >= dc->res_pool->pipe_count) + if (enc_inst == ENGINE_ID_UNKNOWN) return false; - if (enc_inst >= dc->res_pool->stream_enc_count) - return false; + for (i = 0; i < dc->res_pool->stream_enc_count; i++) { + if (dc->res_pool->stream_enc[i]->id == enc_inst) { + + se = dc->res_pool->stream_enc[i]; + + tg_inst = dc->res_pool->stream_enc[i]->funcs->dig_source_otg( + dc->res_pool->stream_enc[i]); + break; + } + } - tg_inst = dc->res_pool->stream_enc[enc_inst]->funcs->dig_source_otg( - dc->res_pool->stream_enc[enc_inst]); + // tg_inst not found + if (i == dc->res_pool->stream_enc_count) + return false; if (tg_inst >= dc->res_pool->timing_generator_count) return false; tg = dc->res_pool->timing_generators[tg_inst]; - if (!tg->funcs->is_matching_timing) + if (!tg->funcs->get_hw_timing) + return false; + + if (!tg->funcs->get_hw_timing(tg, &hw_crtc_timing)) + return false; + + if (crtc_timing->h_total != hw_crtc_timing.h_total) + return false; + + if (crtc_timing->h_border_left != hw_crtc_timing.h_border_left) return false; - if (!tg->funcs->is_matching_timing(tg, crtc_timing)) + if (crtc_timing->h_addressable != hw_crtc_timing.h_addressable) + return false; + + if (crtc_timing->h_border_right != hw_crtc_timing.h_border_right) + return false; + + if (crtc_timing->h_front_porch != hw_crtc_timing.h_front_porch) + return false; + + if (crtc_timing->h_sync_width != hw_crtc_timing.h_sync_width) + return false; + + if (crtc_timing->v_total != hw_crtc_timing.v_total) + return false; + + if (crtc_timing->v_border_top != hw_crtc_timing.v_border_top) + return false; + + if (crtc_timing->v_addressable != hw_crtc_timing.v_addressable) + return false; + + if (crtc_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) + return false; + + if (crtc_timing->v_front_porch != hw_crtc_timing.v_front_porch) + return false; + + if (crtc_timing->v_sync_width != hw_crtc_timing.v_sync_width) return false; if (dc_is_dp_signal(link->connector_signal)) { @@ -1016,6 +1097,20 @@ bool dc_validate_seamless_boot_timing(const struct dc *dc, if (crtc_timing->pix_clk_100hz != pix_clk_100hz) return false; + if (!se->funcs->dp_get_pixel_format) + return false; + + if (!se->funcs->dp_get_pixel_format( + se, + &hw_crtc_timing.pixel_encoding, + &hw_crtc_timing.display_color_depth)) + return false; + + if (hw_crtc_timing.display_color_depth != crtc_timing->display_color_depth) + return false; + + if (hw_crtc_timing.pixel_encoding != crtc_timing->pixel_encoding) + return false; } return true; @@ -1077,15 +1172,20 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c /* re-program planes for existing stream, in case we need to * free up plane resource for later use */ - for (i = 0; i < context->stream_count; i++) { - if (context->streams[i]->mode_changed) - continue; + if (dc->hwss.apply_ctx_for_surface) + for (i = 0; i < context->stream_count; i++) { + if (context->streams[i]->mode_changed) + continue; - dc->hwss.apply_ctx_for_surface( - dc, context->streams[i], - context->stream_status[i].plane_count, - context); /* use new pipe config in new context */ - } + dc->hwss.apply_ctx_for_surface( + dc, context->streams[i], + context->stream_status[i].plane_count, + context); /* use new pipe config in new context */ + } +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, context); +#endif /* Program hardware */ for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -1104,16 +1204,21 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c } /* Program all planes within new context*/ +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, context); +#endif for (i = 0; i < context->stream_count; i++) { const struct dc_link *link = context->streams[i]->link; if (!context->streams[i]->mode_changed) continue; - dc->hwss.apply_ctx_for_surface( - dc, context->streams[i], - context->stream_status[i].plane_count, - context); + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface( + dc, context->streams[i], + context->stream_status[i].plane_count, + context); /* * enable stereo @@ -1140,15 +1245,9 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c dc_enable_stereo(dc, context, dc_streams, context->stream_count); - if (!dc->optimize_seamless_boot) - /* pplib is notified if disp_num changed */ - dc->hwss.optimize_bandwidth(dc, context); - for (i = 0; i < context->stream_count; i++) context->streams[i]->mode_changed = false; - memset(&context->commit_hints, 0, sizeof(context->commit_hints)); - dc_release_state(dc->current_state); dc->current_state = context; @@ -1496,20 +1595,15 @@ static enum surface_update_type det_surface_update(const struct dc *dc, enum surface_update_type overall_type = UPDATE_TYPE_FAST; union surface_update_flags *update_flags = &u->surface->update_flags; - update_flags->raw = 0; // Reset all flags - if (u->flip_addr) update_flags->bits.addr_update = 1; - if (!is_surface_in_context(context, u->surface)) { - update_flags->bits.new_plane = 1; + if (!is_surface_in_context(context, u->surface) || u->surface->force_full_update) { + update_flags->raw = 0xFFFFFFFF; return UPDATE_TYPE_FULL; } - if (u->surface->force_full_update) { - update_flags->bits.full_update = 1; - return UPDATE_TYPE_FULL; - } + update_flags->raw = 0; // Reset all flags type = get_plane_info_update_type(u); elevate_update_type(&overall_type, type); @@ -1567,40 +1661,43 @@ static enum surface_update_type check_update_surfaces_for_stream( enum surface_update_type overall_type = UPDATE_TYPE_FAST; if (stream_status == NULL || stream_status->plane_count != surface_count) - return UPDATE_TYPE_FULL; + overall_type = UPDATE_TYPE_FULL; /* some stream updates require passive update */ if (stream_update) { - if ((stream_update->src.height != 0) && - (stream_update->src.width != 0)) - return UPDATE_TYPE_FULL; + union stream_update_flags *su_flags = &stream_update->stream->update_flags; - if ((stream_update->dst.height != 0) && - (stream_update->dst.width != 0)) - return UPDATE_TYPE_FULL; + if ((stream_update->src.height != 0 && stream_update->src.width != 0) || + (stream_update->dst.height != 0 && stream_update->dst.width != 0)) + su_flags->bits.scaling = 1; if (stream_update->out_transfer_func) - return UPDATE_TYPE_FULL; + su_flags->bits.out_tf = 1; if (stream_update->abm_level) - return UPDATE_TYPE_FULL; + su_flags->bits.abm_level = 1; if (stream_update->dpms_off) - return UPDATE_TYPE_FULL; + su_flags->bits.dpms_off = 1; + + if (stream_update->gamut_remap) + su_flags->bits.gamut_remap = 1; #if defined(CONFIG_DRM_AMD_DC_DCN2_0) if (stream_update->wb_update) - return UPDATE_TYPE_FULL; + su_flags->bits.wb_update = 1; #endif + if (su_flags->raw != 0) + overall_type = UPDATE_TYPE_FULL; + + if (stream_update->output_csc_transform || stream_update->output_color_space) + su_flags->bits.out_csc = 1; } for (i = 0 ; i < surface_count; i++) { enum surface_update_type type = det_surface_update(dc, &updates[i]); - if (type == UPDATE_TYPE_FULL) - return type; - elevate_update_type(&overall_type, type); } @@ -1622,16 +1719,29 @@ enum surface_update_type dc_check_update_surfaces_for_stream( int i; enum surface_update_type type; + if (stream_update) + stream_update->stream->update_flags.raw = 0; for (i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0; type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status); - if (type == UPDATE_TYPE_FULL) + if (type == UPDATE_TYPE_FULL) { + if (stream_update) + stream_update->stream->update_flags.raw = 0xFFFFFFFF; for (i = 0; i < surface_count; i++) updates[i].surface->update_flags.raw = 0xFFFFFFFF; + } - if (type == UPDATE_TYPE_FAST && memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) - dc->optimized_required = true; + if (type == UPDATE_TYPE_FAST) { + // If there's an available clock comparator, we use that. + if (dc->clk_mgr->funcs->are_clock_states_equal) { + if (!dc->clk_mgr->funcs->are_clock_states_equal(&dc->clk_mgr->clks, &dc->current_state->bw_ctx.bw.dcn.clk)) + dc->optimized_required = true; + // Else we fallback to mem compare. + } else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) { + dc->optimized_required = true; + } + } return type; } @@ -1872,6 +1982,7 @@ static void commit_planes_do_stream_update(struct dc *dc, struct dc_state *context) { int j; + bool should_program_abm; // Stream updates for (j = 0; j < dc->res_pool->pipe_count; j++) { @@ -1952,14 +2063,21 @@ static void commit_planes_do_stream_update(struct dc *dc, } if (stream_update->abm_level && pipe_ctx->stream_res.abm) { - if (pipe_ctx->stream_res.tg->funcs->is_blanked) { - // if otg funcs defined check if blanked before programming - if (!pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) + should_program_abm = true; + + // if otg funcs defined check if blanked before programming + if (pipe_ctx->stream_res.tg->funcs->is_blanked) + if (pipe_ctx->stream_res.tg->funcs->is_blanked(pipe_ctx->stream_res.tg)) + should_program_abm = false; + + if (should_program_abm) { + if (*stream_update->abm_level == ABM_LEVEL_IMMEDIATE_DISABLE) { + pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm); + } else { pipe_ctx->stream_res.abm->funcs->set_abm_level( pipe_ctx->stream_res.abm, stream->abm_level); - } else - pipe_ctx->stream_res.abm->funcs->set_abm_level( - pipe_ctx->stream_res.abm, stream->abm_level); + } + } } } } @@ -2004,7 +2122,13 @@ static void commit_planes_for_stream(struct dc *dc, * In case of turning off screen, no need to program front end a second time. * just return after program blank. */ - dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface(dc, stream, 0, context); +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx) + dc->hwss.program_front_end_for_ctx(dc, context); +#endif + return; } @@ -2064,10 +2188,15 @@ static void commit_planes_for_stream(struct dc *dc, stream_status = stream_get_status(context, pipe_ctx->stream); - dc->hwss.apply_ctx_for_surface( + if (dc->hwss.apply_ctx_for_surface) + dc->hwss.apply_ctx_for_surface( dc, pipe_ctx->stream, stream_status->plane_count, context); } } +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + if (dc->hwss.program_front_end_for_ctx && update_type != UPDATE_TYPE_FAST) + dc->hwss.program_front_end_for_ctx(dc, context); +#endif // Update Type FAST, Surface updates if (update_type == UPDATE_TYPE_FAST) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index ca20b150afcc..12ba6fdf89b7 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -79,7 +79,6 @@ static void destruct(struct dc_link *link) int i; if (link->hpd_gpio != NULL) { - dal_gpio_close(link->hpd_gpio); dal_gpio_destroy_irq(&link->hpd_gpio); link->hpd_gpio = NULL; } @@ -520,7 +519,7 @@ static void link_disconnect_remap(struct dc_sink *prev_sink, struct dc_link *lin } -static void read_edp_current_link_settings_on_detect(struct dc_link *link) +static void read_current_link_settings_on_detect(struct dc_link *link) { union lane_count_set lane_count_set = { {0} }; uint8_t link_bw_set; @@ -555,17 +554,23 @@ static void read_edp_current_link_settings_on_detect(struct dc_link *link) &link_bw_set, sizeof(link_bw_set)); if (link_bw_set == 0) { - /* If standard link rates are not being used, - * Read DPCD 00115h to find the link rate set used - */ - core_link_read_dpcd(link, DP_LINK_RATE_SET, - &link_rate_set, sizeof(link_rate_set)); - - if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { - link->cur_link_settings.link_rate = - link->dpcd_caps.edp_supported_link_rates[link_rate_set]; - link->cur_link_settings.link_rate_set = link_rate_set; - link->cur_link_settings.use_link_rate_set = true; + if (link->connector_signal == SIGNAL_TYPE_EDP) { + /* If standard link rates are not being used, + * Read DPCD 00115h to find the edp link rate set used + */ + core_link_read_dpcd(link, DP_LINK_RATE_SET, + &link_rate_set, sizeof(link_rate_set)); + + // edp_supported_link_rates_count = 0 for DP + if (link_rate_set < link->dpcd_caps.edp_supported_link_rates_count) { + link->cur_link_settings.link_rate = + link->dpcd_caps.edp_supported_link_rates[link_rate_set]; + link->cur_link_settings.link_rate_set = link_rate_set; + link->cur_link_settings.use_link_rate_set = true; + } + } else { + // Link Rate not found. Seamless boot may not work. + ASSERT(false); } } else { link->cur_link_settings.link_rate = link_bw_set; @@ -680,7 +685,7 @@ static bool is_same_edid(struct dc_edid *old_edid, struct dc_edid *new_edid) return (memcmp(old_edid->raw_edid, new_edid->raw_edid, new_edid->length) == 0); } -bool wait_for_alt_mode(struct dc_link *link) +static bool wait_for_alt_mode(struct dc_link *link) { /** @@ -738,7 +743,8 @@ bool wait_for_alt_mode(struct dc_link *link) * This does not create remote sinks but will trigger DM * to start MST detection if a branch is detected. */ -bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) +static bool dc_link_detect_helper(struct dc_link *link, + enum dc_detect_reason reason) { struct dc_sink_init_data sink_init_data = { 0 }; struct display_sink_capability sink_caps = { 0 }; @@ -753,6 +759,8 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) struct dpcd_caps prev_dpcd_caps; bool same_dpcd = true; enum dc_connection_type new_connection_type = dc_connection_none; + bool perform_dp_seamless_boot = false; + DC_LOGGER_INIT(link->ctx->logger); if (dc_is_virtual_signal(link->connector_signal)) @@ -809,15 +817,15 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) } case SIGNAL_TYPE_EDP: { - read_edp_current_link_settings_on_detect(link); + read_current_link_settings_on_detect(link); detect_edp_sink_caps(link); - sink_caps.transaction_type = - DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX; sink_caps.signal = SIGNAL_TYPE_EDP; break; } case SIGNAL_TYPE_DISPLAY_PORT: { + /* wa HPD high coming too early*/ if (link->link_enc->features.flags.bits.DP_IS_USB_C == 1) { @@ -865,12 +873,24 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * empty which leads to allocate_mst_payload() has "0" * pbn_per_slot value leading to exception on dc_fixpt_div() */ - link->verified_link_cap = link->reported_link_cap; + dp_verify_mst_link_cap(link); + if (prev_sink != NULL) dc_sink_release(prev_sink); return false; } + // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified. + if (reason == DETECT_REASON_BOOT && + dc_ctx->dc->config.power_down_display_on_boot == false && + link->link_status.link_active == true) + perform_dp_seamless_boot = true; + + if (perform_dp_seamless_boot) { + read_current_link_settings_on_detect(link); + link->verified_link_cap = link->reported_link_cap; + } + break; } @@ -955,10 +975,11 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) * two link trainings */ - /* deal with non-mst cases */ - dp_verify_link_cap_with_retries(link, - &link->reported_link_cap, - LINK_TRAINING_MAX_VERIFY_RETRY); + // verify link cap for SST non-seamless boot + if (!perform_dp_seamless_boot) + dp_verify_link_cap_with_retries(link, + &link->reported_link_cap, + LINK_TRAINING_MAX_VERIFY_RETRY); } else { // If edid is the same, then discard new sink and revert back to original sink if (same_edid) { @@ -1047,6 +1068,23 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) dc_sink_release(prev_sink); return true; + +} + +bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) +{ + const struct dc *dc = link->dc; + bool ret; + + /* get out of low power state */ + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); + + ret = dc_link_detect_helper(link, reason); + + /* Go back to power optimized state */ + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); + + return ret; } bool dc_link_get_hpd_state(struct dc_link *dc_link) @@ -1492,7 +1530,7 @@ static enum dc_status enable_link_dp( pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - if (!apply_seamless_boot_optimization) + if (state->clk_mgr && !apply_seamless_boot_optimization) state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); dp_enable_link_phy( @@ -2169,8 +2207,10 @@ static void disable_link(struct dc_link *link, enum signal_type signal) dp_set_fec_ready(link, false); } #endif - } else - link->link_enc->funcs->disable_output(link->link_enc, signal); + } else { + if (signal != SIGNAL_TYPE_VIRTUAL) + link->link_enc->funcs->disable_output(link->link_enc, signal); + } if (signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { /* MST disable link only when no stream use the link */ @@ -2217,7 +2257,7 @@ static bool dp_active_dongle_validate_timing( break; } - if (dongle_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || + if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || dongle_caps->extendedCapValid == false) return true; @@ -2381,17 +2421,206 @@ bool dc_link_set_abm_disable(const struct dc_link *link) return true; } -bool dc_link_set_psr_enable(const struct dc_link *link, bool enable, bool wait) +bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool wait) { struct dc *core_dc = link->ctx->dc; struct dmcu *dmcu = core_dc->res_pool->dmcu; - if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_enabled) - dmcu->funcs->set_psr_enable(dmcu, enable, wait); + + + if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled) + dmcu->funcs->set_psr_enable(dmcu, allow_active, wait); + + link->psr_allow_active = allow_active; return true; } +bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state) +{ + struct dc *core_dc = link->ctx->dc; + struct dmcu *dmcu = core_dc->res_pool->dmcu; + + if (dmcu != NULL && link->psr_feature_enabled) + dmcu->funcs->get_psr_state(dmcu, psr_state); + + return true; +} + +static inline enum physical_phy_id +transmitter_to_phy_id(enum transmitter transmitter_value) +{ + switch (transmitter_value) { + case TRANSMITTER_UNIPHY_A: + return PHYLD_0; + case TRANSMITTER_UNIPHY_B: + return PHYLD_1; + case TRANSMITTER_UNIPHY_C: + return PHYLD_2; + case TRANSMITTER_UNIPHY_D: + return PHYLD_3; + case TRANSMITTER_UNIPHY_E: + return PHYLD_4; + case TRANSMITTER_UNIPHY_F: + return PHYLD_5; + case TRANSMITTER_NUTMEG_CRT: + return PHYLD_6; + case TRANSMITTER_TRAVIS_CRT: + return PHYLD_7; + case TRANSMITTER_TRAVIS_LCD: + return PHYLD_8; + case TRANSMITTER_UNIPHY_G: + return PHYLD_9; + case TRANSMITTER_COUNT: + return PHYLD_COUNT; + case TRANSMITTER_UNKNOWN: + return PHYLD_UNKNOWN; + default: + WARN_ONCE(1, "Unknown transmitter value %d\n", + transmitter_value); + return PHYLD_UNKNOWN; + } +} + +bool dc_link_setup_psr(struct dc_link *link, + const struct dc_stream_state *stream, struct psr_config *psr_config, + struct psr_context *psr_context) +{ + struct dc *core_dc; + struct dmcu *dmcu; + int i; + /* updateSinkPsrDpcdConfig*/ + union dpcd_psr_configuration psr_configuration; + + psr_context->controllerId = CONTROLLER_ID_UNDEFINED; + + if (!link) + return false; + + core_dc = link->ctx->dc; + dmcu = core_dc->res_pool->dmcu; + + if (!dmcu) + return false; + + + memset(&psr_configuration, 0, sizeof(psr_configuration)); + + psr_configuration.bits.ENABLE = 1; + psr_configuration.bits.CRC_VERIFICATION = 1; + psr_configuration.bits.FRAME_CAPTURE_INDICATION = + psr_config->psr_frame_capture_indication_req; + + /* Check for PSR v2*/ + if (psr_config->psr_version == 0x2) { + /* For PSR v2 selective update. + * Indicates whether sink should start capturing + * immediately following active scan line, + * or starting with the 2nd active scan line. + */ + psr_configuration.bits.LINE_CAPTURE_INDICATION = 0; + /*For PSR v2, determines whether Sink should generate + * IRQ_HPD when CRC mismatch is detected. + */ + psr_configuration.bits.IRQ_HPD_WITH_CRC_ERROR = 1; + } + + dm_helpers_dp_write_dpcd( + link->ctx, + link, + 368, + &psr_configuration.raw, + sizeof(psr_configuration.raw)); + + psr_context->channel = link->ddc->ddc_pin->hw_info.ddc_channel; + psr_context->transmitterId = link->link_enc->transmitter; + psr_context->engineId = link->link_enc->preferred_engine; + + for (i = 0; i < MAX_PIPES; i++) { + if (core_dc->current_state->res_ctx.pipe_ctx[i].stream + == stream) { + /* dmcu -1 for all controller id values, + * therefore +1 here + */ + psr_context->controllerId = + core_dc->current_state->res_ctx. + pipe_ctx[i].stream_res.tg->inst + 1; + break; + } + } + + /* Hardcoded for now. Can be Pcie or Uniphy (or Unknown)*/ + psr_context->phyType = PHY_TYPE_UNIPHY; + /*PhyId is associated with the transmitter id*/ + psr_context->smuPhyId = + transmitter_to_phy_id(link->link_enc->transmitter); + + psr_context->crtcTimingVerticalTotal = stream->timing.v_total; + psr_context->vsyncRateHz = div64_u64(div64_u64((stream-> + timing.pix_clk_100hz * 100), + stream->timing.v_total), + stream->timing.h_total); + + psr_context->psrSupportedDisplayConfig = true; + psr_context->psrExitLinkTrainingRequired = + psr_config->psr_exit_link_training_required; + psr_context->sdpTransmitLineNumDeadline = + psr_config->psr_sdp_transmit_line_num_deadline; + psr_context->psrFrameCaptureIndicationReq = + psr_config->psr_frame_capture_indication_req; + + psr_context->skipPsrWaitForPllLock = 0; /* only = 1 in KV */ + + psr_context->numberOfControllers = + link->dc->res_pool->timing_generator_count; + + psr_context->rfb_update_auto_en = true; + + /* 2 frames before enter PSR. */ + psr_context->timehyst_frames = 2; + /* half a frame + * (units in 100 lines, i.e. a value of 1 represents 100 lines) + */ + psr_context->hyst_lines = stream->timing.v_total / 2 / 100; + psr_context->aux_repeats = 10; + + psr_context->psr_level.u32all = 0; + +#if defined(CONFIG_DRM_AMD_DC_DCN1_0) + /*skip power down the single pipe since it blocks the cstate*/ + if (ASICREV_IS_RAVEN(link->ctx->asic_id.hw_internal_rev)) + psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true; +#endif + + /* SMU will perform additional powerdown sequence. + * For unsupported ASICs, set psr_level flag to skip PSR + * static screen notification to SMU. + * (Always set for DAL2, did not check ASIC) + */ + psr_context->allow_smu_optimizations = psr_config->allow_smu_optimizations; + + /* Complete PSR entry before aborting to prevent intermittent + * freezes on certain eDPs + */ + psr_context->psr_level.bits.DISABLE_PSR_ENTRY_ABORT = 1; + + /* Controls additional delay after remote frame capture before + * continuing power down, default = 0 + */ + psr_context->frame_delay = 0; + + link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context); + + /* psr_enabled == 0 indicates setup_psr did not succeed, but this + * should not happen since firmware should be running at this point + */ + if (link->psr_feature_enabled == 0) + ASSERT(0); + + return true; + +} + const struct dc_link_status *dc_link_get_status(const struct dc_link *link) { return &link->link_status; @@ -2510,7 +2739,7 @@ static void update_mst_stream_alloc_table( /* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table * because stream_encoder is not exposed to dm */ -static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) +enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) { struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; @@ -2521,6 +2750,7 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; uint8_t i; + enum act_return_status ret; DC_LOGGER_INIT(link->ctx->logger); /* enable_link_dp_mst already check link->enabled_stream_count @@ -2568,14 +2798,16 @@ static enum dc_status allocate_mst_payload(struct pipe_ctx *pipe_ctx) &link->mst_stream_alloc_table); /* send down message */ - dm_helpers_dp_mst_poll_for_allocation_change_trigger( + ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( stream->ctx, stream); - dm_helpers_dp_mst_send_payload_allocation( - stream->ctx, - stream, - true); + if (ret != ACT_LINK_LOST) { + dm_helpers_dp_mst_send_payload_allocation( + stream->ctx, + stream, + true); + } /* slot X.Y for only current stream */ pbn_per_slot = get_pbn_per_slot(stream); @@ -2667,6 +2899,24 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) return DC_OK; } +#if defined(CONFIG_DRM_AMD_DC_HDCP) +static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) +{ + struct cp_psp *cp_psp = &pipe_ctx->stream->ctx->cp_psp; + if (cp_psp && cp_psp->funcs.update_stream_config) { + struct cp_psp_stream_config config; + + memset(&config, 0, sizeof(config)); + + config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst; + config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id; + config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst; + config.dpms_off = dpms_off; + config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context; + cp_psp->funcs.update_stream_config(cp_psp->handle, &config); + } +} +#endif void core_link_enable_stream( struct dc_state *state, @@ -2677,6 +2927,10 @@ void core_link_enable_stream( enum dc_status status; DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); + if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && + dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; + if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) { stream->link->link_enc->funcs->setup( stream->link->link_enc, @@ -2727,6 +2981,9 @@ void core_link_enable_stream( /* Do not touch link on seamless boot optimization. */ if (pipe_ctx->stream->apply_seamless_boot_optimization) { pipe_ctx->stream->dpms_off = false; +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif return; } @@ -2734,6 +2991,9 @@ void core_link_enable_stream( if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP && apply_edp_fast_boot_optimization) { pipe_ctx->stream->dpms_off = false; +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif return; } @@ -2786,13 +3046,16 @@ void core_link_enable_stream( #endif if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) - allocate_mst_payload(pipe_ctx); + dc_link_allocate_mst_payload(pipe_ctx); core_dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); if (dc_is_dp_signal(pipe_ctx->stream->signal)) enable_stream_features(pipe_ctx); +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, false); +#endif } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT else { // if (IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment)) @@ -2810,6 +3073,14 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; + if (!IS_FPGA_MAXIMUS_DC(core_dc->ctx->dce_environment) && + dc_is_virtual_signal(pipe_ctx->stream->signal)) + return; + +#if defined(CONFIG_DRM_AMD_DC_HDCP) + update_psp_stream_config(pipe_ctx, true); +#endif + core_dc->hwss.blank_stream(pipe_ctx); if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c index 51991bf26a93..7f904d55c1bc 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c @@ -508,7 +508,7 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size) { - bool ret; + bool ret = false; uint32_t payload_size = dal_ddc_service_is_in_aux_transaction_mode(ddc) ? DEFAULT_AUX_MAX_DATA_SIZE : EDID_SEGMENT_SIZE; @@ -527,34 +527,32 @@ bool dal_ddc_service_query_ddc_data( /*TODO: len of payload data for i2c and aux is uint8!!!!, * but we want to read 256 over i2c!!!!*/ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) { - struct aux_payload write_payload = { - .i2c_over_aux = true, - .write = true, - .mot = true, - .address = address, - .length = write_size, - .data = write_buf, - .reply = NULL, - .defer_delay = get_defer_delay(ddc), - }; - - struct aux_payload read_payload = { - .i2c_over_aux = true, - .write = false, - .mot = false, - .address = address, - .length = read_size, - .data = read_buf, - .reply = NULL, - .defer_delay = get_defer_delay(ddc), - }; - - ret = dc_link_aux_transfer_with_retries(ddc, &write_payload); + struct aux_payload payload; + bool read_available = true; + + payload.i2c_over_aux = true; + payload.address = address; + payload.reply = NULL; + payload.defer_delay = get_defer_delay(ddc); + + if (write_size != 0) { + payload.write = true; + payload.mot = false; + payload.length = write_size; + payload.data = write_buf; + + ret = dal_ddc_submit_aux_command(ddc, &payload); + read_available = ret; + } - if (!ret) - return false; + if (read_size != 0 && read_available) { + payload.write = false; + payload.mot = false; + payload.length = read_size; + payload.data = read_buf; - ret = dc_link_aux_transfer_with_retries(ddc, &read_payload); + ret = dal_ddc_submit_aux_command(ddc, &payload); + } } else { struct i2c_payloads *payloads = dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num); @@ -585,6 +583,41 @@ bool dal_ddc_service_query_ddc_data( return ret; } +bool dal_ddc_submit_aux_command(struct ddc_service *ddc, + struct aux_payload *payload) +{ + uint8_t retrieved = 0; + bool ret = 0; + + if (!ddc) + return false; + + if (!payload) + return false; + + do { + struct aux_payload current_payload; + bool is_end_of_payload = (retrieved + DEFAULT_AUX_MAX_DATA_SIZE) > + payload->length ? true : false; + + current_payload.address = payload->address; + current_payload.data = &payload->data[retrieved]; + current_payload.defer_delay = payload->defer_delay; + current_payload.i2c_over_aux = payload->i2c_over_aux; + current_payload.length = is_end_of_payload ? + payload->length - retrieved : DEFAULT_AUX_MAX_DATA_SIZE; + current_payload.mot = !is_end_of_payload; + current_payload.reply = payload->reply; + current_payload.write = payload->write; + + ret = dc_link_aux_transfer_with_retries(ddc, ¤t_payload); + + retrieved += current_payload.length; + } while (retrieved < payload->length && ret == true); + + return ret; +} + /* dc_link_aux_transfer_raw() - Attempt to transfer * the given aux payload. This function does not perform * retries or handle error states. The reply is returned @@ -613,6 +646,20 @@ bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, return dce_aux_transfer_with_retries(ddc, payload); } + +enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc, + uint32_t timeout) +{ + enum dc_status status = DC_OK; + struct ddc *ddc_pin = ddc->ddc_pin; + + if (ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout == NULL) + return DC_ERROR_UNEXPECTED; + if (!ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]->funcs->configure_timeout(ddc, timeout)) + status = DC_ERROR_UNEXPECTED; + return status; +} + /*test only function*/ void dal_ddc_service_set_ddc_pin( struct ddc_service *ddc_service, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index f5742719b5d9..0f59b68aa4c2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1409,6 +1409,9 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link) if (link->link_enc->features.flags.bits.IS_HBR3_CAPABLE) max_link_cap.link_rate = LINK_RATE_HIGH3; + if (link->link_enc->funcs->get_max_link_cap) + link->link_enc->funcs->get_max_link_cap(link->link_enc, &max_link_cap); + /* Lower link settings based on sink's link cap */ if (link->reported_link_cap.lane_count < max_link_cap.lane_count) max_link_cap.lane_count = @@ -1653,11 +1656,14 @@ bool dp_verify_link_cap_with_retries( for (i = 0; i < attempts; i++) { int fail_count = 0; - enum dc_connection_type type; + enum dc_connection_type type = dc_connection_none; memset(&link->verified_link_cap, 0, sizeof(struct dc_link_settings)); - if (!dc_link_detect_sink(link, &type)) { + if (!dc_link_detect_sink(link, &type) || type == dc_connection_none) { + link->verified_link_cap.lane_count = LANE_COUNT_ONE; + link->verified_link_cap.link_rate = LINK_RATE_LOW; + link->verified_link_cap.link_spread = LINK_SPREAD_DISABLED; break; } else if (dp_verify_link_cap(link, &link->reported_link_cap, @@ -1670,6 +1676,19 @@ bool dp_verify_link_cap_with_retries( return success; } +bool dp_verify_mst_link_cap( + struct dc_link *link) +{ + struct dc_link_settings max_link_cap = {0}; + + max_link_cap = get_max_link_cap(link); + link->verified_link_cap = get_common_supported_link_settings( + link->reported_link_cap, + max_link_cap); + + return true; +} + static struct dc_link_settings get_common_supported_link_settings( struct dc_link_settings link_setting_a, struct dc_link_settings link_setting_b) @@ -2057,11 +2076,11 @@ static bool allow_hpd_rx_irq(const struct dc_link *link) return false; } -static bool handle_hpd_irq_psr_sink(const struct dc_link *link) +static bool handle_hpd_irq_psr_sink(struct dc_link *link) { union dpcd_psr_configuration psr_configuration; - if (!link->psr_enabled) + if (!link->psr_feature_enabled) return false; dm_helpers_dp_read_dpcd( @@ -2100,8 +2119,8 @@ static bool handle_hpd_irq_psr_sink(const struct dc_link *link) sizeof(psr_error_status.raw)); /* PSR error, disable and re-enable PSR */ - dc_link_set_psr_enable(link, false, true); - dc_link_set_psr_enable(link, true, true); + dc_link_set_psr_allow_active(link, false, true); + dc_link_set_psr_allow_active(link, true, true); return true; } else if (psr_sink_psr_status.bits.SINK_SELF_REFRESH_STATUS == @@ -2364,6 +2383,8 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd enum dc_status result; bool status = false; + struct pipe_ctx *pipe_ctx; + int i; if (out_link_loss) *out_link_loss = false; @@ -2440,6 +2461,15 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd &link->cur_link_settings, true, LINK_TRAINING_ATTEMPTS); + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link && + pipe_ctx->stream->dpms_off == false && + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + dc_link_allocate_mst_payload(pipe_ctx); + } + } + status = false; if (out_link_loss) *out_link_loss = true; @@ -2545,6 +2575,7 @@ static void get_active_converter_info( uint8_t data, struct dc_link *link) { union dp_downstream_port_present ds_port = { .byte = data }; + memset(&link->dpcd_caps.dongle_caps, 0, sizeof(link->dpcd_caps.dongle_caps)); /* decode converter info*/ if (!ds_port.fields.PORT_PRESENT) { @@ -2691,6 +2722,7 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, * keep receiver powered all the time.*/ case DP_BRANCH_DEVICE_ID_0010FA: case DP_BRANCH_DEVICE_ID_0080E1: + case DP_BRANCH_DEVICE_ID_00E04C: link->wa_flags.dp_keep_receiver_powered = true; break; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 79438c4f1e20..a519dbc5ecb6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -277,7 +277,8 @@ void dp_retrain_link_dp_test(struct dc_link *link, if (pipes[i].stream != NULL && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe && pipes[i].stream->link != NULL && - pipes[i].stream_res.stream_enc != NULL) { + pipes[i].stream_res.stream_enc != NULL && + pipes[i].stream->link == link) { udelay(100); pipes[i].stream_res.stream_enc->funcs->dp_blank( diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index f25ac17f47fa..37698305a2dc 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -951,7 +951,7 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx) data->inits.v_c_bot = dc_fixpt_add(data->inits.v_c, data->ratios.vert_c); } -static bool are_rect_integer_multiples(struct rect src, struct rect dest) +static bool are_rects_integer_multiples(struct rect src, struct rect dest) { if (dest.width >= src.width && dest.width % src.width == 0 && dest.height >= src.height && dest.height % src.height == 0) @@ -959,6 +959,38 @@ static bool are_rect_integer_multiples(struct rect src, struct rect dest) return false; } + +static void calculate_integer_scaling(struct pipe_ctx *pipe_ctx) +{ + if (!pipe_ctx->plane_state->scaling_quality.integer_scaling) + return; + + //for Centered Mode + if (pipe_ctx->stream->dst.width == pipe_ctx->stream->src.width && + pipe_ctx->stream->dst.height == pipe_ctx->stream->src.height) { + // calculate maximum # of replication of src onto addressable + unsigned int integer_multiple = min( + pipe_ctx->stream->timing.h_addressable / pipe_ctx->stream->src.width, + pipe_ctx->stream->timing.v_addressable / pipe_ctx->stream->src.height); + + //scale dst + pipe_ctx->stream->dst.width = integer_multiple * pipe_ctx->stream->src.width; + pipe_ctx->stream->dst.height = integer_multiple * pipe_ctx->stream->src.height; + + //center dst onto addressable + pipe_ctx->stream->dst.x = (pipe_ctx->stream->timing.h_addressable - pipe_ctx->stream->dst.width)/2; + pipe_ctx->stream->dst.y = (pipe_ctx->stream->timing.v_addressable - pipe_ctx->stream->dst.height)/2; + } + + //disable taps if src & dst are integer ratio + if (are_rects_integer_multiples(pipe_ctx->stream->src, pipe_ctx->stream->dst)) { + pipe_ctx->plane_state->scaling_quality.v_taps = 1; + pipe_ctx->plane_state->scaling_quality.h_taps = 1; + pipe_ctx->plane_state->scaling_quality.v_taps_c = 1; + pipe_ctx->plane_state->scaling_quality.h_taps_c = 1; + } +} + bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) { const struct dc_plane_state *plane_state = pipe_ctx->plane_state; @@ -972,6 +1004,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) pipe_ctx->plane_res.scl_data.format = convert_pixel_format_to_dalsurface( pipe_ctx->plane_state->format); + calculate_integer_scaling(pipe_ctx); + calculate_scaling_ratios(pipe_ctx); calculate_viewport(pipe_ctx); @@ -1002,13 +1036,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx) res = pipe_ctx->plane_res.dpp->funcs->dpp_get_optimal_number_of_taps( pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data, &plane_state->scaling_quality); - if (res && - plane_state->scaling_quality.integer_scaling && - are_rect_integer_multiples(pipe_ctx->plane_res.scl_data.viewport, - pipe_ctx->plane_res.scl_data.recout)) { - pipe_ctx->plane_res.scl_data.taps.v_taps = 1; - pipe_ctx->plane_res.scl_data.taps.h_taps = 1; - } if (!res) { /* Try 24 bpp linebuffer */ @@ -1635,7 +1662,8 @@ static int acquire_first_free_pipe( static struct audio *find_first_free_audio( struct resource_context *res_ctx, const struct resource_pool *pool, - enum engine_id id) + enum engine_id id, + enum dce_version dc_version) { int i, available_audio_count; @@ -1854,28 +1882,28 @@ static int acquire_resource_from_hw_enabled_state( struct dc_stream_state *stream) { struct dc_link *link = stream->link; - unsigned int inst, tg_inst; + unsigned int i, inst, tg_inst = 0; /* Check for enabled DIG to identify enabled display */ if (!link->link_enc->funcs->is_dig_enabled(link->link_enc)) return -1; - /* Check for which front end is used by this encoder. - * Note the inst is 1 indexed, where 0 is undefined. - * Note that DIG_FE can source from different OTG but our - * current implementation always map 1-to-1, so this code makes - * the same assumption and doesn't check OTG source. - */ inst = link->link_enc->funcs->get_dig_frontend(link->link_enc); - /* Instance should be within the range of the pool */ - if (inst >= pool->pipe_count) - return -1; + if (inst == ENGINE_ID_UNKNOWN) + return false; - if (inst >= pool->stream_enc_count) - return -1; + for (i = 0; i < pool->stream_enc_count; i++) { + if (pool->stream_enc[i]->id == inst) { + tg_inst = pool->stream_enc[i]->funcs->dig_source_otg( + pool->stream_enc[i]); + break; + } + } - tg_inst = pool->stream_enc[inst]->funcs->dig_source_otg(pool->stream_enc[inst]); + // tg_inst not found + if (i == pool->stream_enc_count) + return false; if (tg_inst >= pool->timing_generator_count) return false; @@ -1971,7 +1999,7 @@ enum dc_status resource_map_pool_resources( dc_is_audio_capable_signal(pipe_ctx->stream->signal) && stream->audio_info.mode_count && stream->audio_info.flags.all) { pipe_ctx->stream_res.audio = find_first_free_audio( - &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id); + &context->res_ctx, pool, pipe_ctx->stream_res.stream_enc->id, dc_ctx->dce_version); /* * Audio assigned in order first come first get. diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c index bf1d7bb90e0f..bb09243758fe 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c @@ -423,10 +423,10 @@ bool dc_stream_add_writeback(struct dc *dc, if (dwb->funcs->is_enabled(dwb)) { /* writeback pipe already enabled, only need to update */ - dc->hwss.update_writeback(dc, stream_status, wb_info); + dc->hwss.update_writeback(dc, stream_status, wb_info, dc->current_state); } else { /* Enable writeback pipe from scratch*/ - dc->hwss.enable_writeback(dc, stream_status, wb_info); + dc->hwss.enable_writeback(dc, stream_status, wb_info, dc->current_state); } } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index a82352a87808..0416a17b0897 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -39,7 +39,7 @@ #include "inc/hw/dmcu.h" #include "dml/display_mode_lib.h" -#define DC_VER "3.2.48" +#define DC_VER "3.2.56" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -111,19 +111,20 @@ struct dc_caps { bool force_dp_tps4_for_cp2520; bool disable_dp_clk_share; bool psp_setup_panel_mode; + bool extended_aux_timeout_support; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 bool hw_3d_lut; #endif struct dc_plane_cap planes[MAX_PLANES]; }; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_bug_wa { +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool no_connect_phy_config; bool dedcn20_305_wa; +#endif bool skip_clock_update; }; -#endif struct dc_dcc_surface_param { struct dc_size surface_size; @@ -219,7 +220,9 @@ struct dc_config { bool allow_seamless_boot_optimization; bool power_down_display_on_boot; bool edp_not_connected; + bool force_enum_edp; bool forced_clocks; + bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well bool multi_mon_pp_mclk_switch; }; @@ -227,6 +230,7 @@ enum visual_confirm { VISUAL_CONFIRM_DISABLE = 0, VISUAL_CONFIRM_SURFACE = 1, VISUAL_CONFIRM_HDR = 2, + VISUAL_CONFIRM_MPCTREE = 4, }; enum dcc_option { @@ -245,6 +249,19 @@ enum wm_report_mode { WM_REPORT_DEFAULT = 0, WM_REPORT_OVERRIDE = 1, }; +enum dtm_pstate{ + dtm_level_p0 = 0,/*highest voltage*/ + dtm_level_p1, + dtm_level_p2, + dtm_level_p3, + dtm_level_p4,/*when active_display_count = 0*/ +}; + +enum dcn_pwr_state { + DCN_PWR_STATE_UNKNOWN = -1, + DCN_PWR_STATE_MISSION_MODE = 0, + DCN_PWR_STATE_LOW_POWER = 3, +}; /* * For any clocks that may differ per pipe @@ -252,11 +269,7 @@ enum wm_report_mode { */ struct dc_clocks { int dispclk_khz; - int max_supported_dppclk_khz; - int max_supported_dispclk_khz; int dppclk_khz; - int bw_dppclk_khz; /*a copy of dppclk_khz*/ - int bw_dispclk_khz; int dcfclk_khz; int socclk_khz; int dcfclk_deep_sleep_khz; @@ -264,12 +277,17 @@ struct dc_clocks { int phyclk_khz; int dramclk_khz; bool p_state_change_support; - + enum dcn_pwr_state pwr_state; /* * Elements below are not compared for the purposes of * optimization required */ bool prev_p_state_change_support; + enum dtm_pstate dtm_level; + int max_supported_dppclk_khz; + int max_supported_dispclk_khz; + int bw_dppclk_khz; /*a copy of dppclk_khz*/ + int bw_dispclk_khz; }; struct dc_bw_validation_profile { @@ -347,6 +365,7 @@ struct dc_debug_options { bool disable_hubp_power_gate; #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool disable_dsc_power_gate; + int dsc_min_slice_height_override; #endif bool disable_pplib_wm_range; enum wm_report_mode pplib_wm_report_mode; @@ -462,9 +481,7 @@ struct dc { struct dc_config config; struct dc_debug_options debug; struct dc_bounding_box_overrides bb_overrides; -#if defined(CONFIG_DRM_AMD_DC_DCN2_0) struct dc_bug_wa work_arounds; -#endif struct dc_context *ctx; #ifdef CONFIG_DRM_AMD_DC_DCN2_0 struct dc_phy_addr_space_config vm_pa_config; @@ -553,10 +570,16 @@ struct dc_init_data { }; struct dc_callback_init { +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct cp_psp cp_psp; +#else uint8_t reserved; +#endif }; struct dc *dc_create(const struct dc_init_data *init_params); +void dc_hardware_init(struct dc *dc); + int dc_get_vmid_use_vector(struct dc *dc); #ifdef CONFIG_DRM_AMD_DC_DCN2_0 void dc_setup_vm_context(struct dc *dc, struct dc_virtual_addr_space_config *va_config, int vmid); @@ -565,6 +588,7 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c #endif void dc_init_callbacks(struct dc *dc, const struct dc_callback_init *init_params); +void dc_deinit_callbacks(struct dc *dc); void dc_destroy(struct dc **dc); /******************************************************************************* diff --git a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h index 4ef97f65e55d..4f8f576d5fcf 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_ddc_types.h @@ -49,7 +49,8 @@ enum aux_channel_operation_result { AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN, AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY, AUX_CHANNEL_OPERATION_FAILED_TIMEOUT, - AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON + AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON, + AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_dsc.h b/drivers/gpu/drm/amd/display/dc/dc_dsc.h index 6e42209f0e20..0ed2962add5a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dsc.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dsc.h @@ -30,6 +30,7 @@ #define DP_DSC_BRANCH_OVERALL_THROUGHPUT_0 0x0a0 /* DP 1.4a SCR */ #define DP_DSC_BRANCH_OVERALL_THROUGHPUT_1 0x0a1 #define DP_DSC_BRANCH_MAX_LINE_WIDTH 0x0a2 +#include "dc_types.h" struct dc_dsc_bw_range { uint32_t min_kbps; /* Bandwidth if min_target_bpp_x16 is used */ @@ -39,13 +40,21 @@ struct dc_dsc_bw_range { uint32_t stream_kbps; /* Uncompressed stream bandwidth */ }; +struct display_stream_compressor { + const struct dsc_funcs *funcs; +#ifndef AMD_EDID_UTILITY + struct dc_context *ctx; + int inst; +#endif +}; bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dpcd_dsc_ext_data, struct dsc_dec_dpcd_caps *dsc_sink_caps); bool dc_dsc_compute_bandwidth_range( - const struct dc *dc, + const struct display_stream_compressor *dsc, + const uint32_t dsc_min_slice_height_override, const uint32_t min_kbps, const uint32_t max_kbps, const struct dsc_dec_dpcd_caps *dsc_sink_caps, @@ -53,8 +62,9 @@ bool dc_dsc_compute_bandwidth_range( struct dc_dsc_bw_range *range); bool dc_dsc_compute_config( - const struct dc *dc, + const struct display_stream_compressor *dsc, const struct dsc_dec_dpcd_caps *dsc_sink_caps, + const uint32_t dsc_min_slice_height_override, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg); diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h index 0b8700a8a94a..e0856bb8511f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h @@ -26,6 +26,8 @@ #ifndef DC_HW_TYPES_H #define DC_HW_TYPES_H +#ifndef AMD_EDID_UTILITY + #include "os_types.h" #include "fixed31_32.h" #include "signal_types.h" @@ -124,20 +126,6 @@ struct plane_size { int chroma_pitch; struct rect surface_size; struct rect chroma_size; - - union { - struct { - struct rect surface_size; - int surface_pitch; - } grph; - - struct { - struct rect luma_size; - int luma_pitch; - struct rect chroma_size; - int chroma_pitch; - } video; - }; }; struct dc_plane_dcc_param { @@ -148,21 +136,6 @@ struct dc_plane_dcc_param { int meta_pitch_c; bool independent_64b_blks_c; - - union { - struct { - int meta_pitch; - bool independent_64b_blks; - } grph; - - struct { - int meta_pitch_l; - bool independent_64b_blks_l; - - int meta_pitch_c; - bool independent_64b_blks_c; - } video; - }; }; /*Displayable pixel format in fb*/ @@ -605,6 +578,11 @@ enum dc_quantization_range { QUANTIZATION_RANGE_LIMITED }; +enum dc_dynamic_expansion { + DYN_EXPANSION_AUTO, + DYN_EXPANSION_DISABLE +}; + /* XFM */ /* used in struct dc_plane_state */ @@ -616,6 +594,8 @@ struct scaling_taps { bool integer_scaling; }; +#endif /* AMD_EDID_UTILITY */ + enum dc_timing_standard { DC_TIMING_STANDARD_UNDEFINED, DC_TIMING_STANDARD_DMT, @@ -737,30 +717,6 @@ enum dc_timing_3d_format { TIMING_3D_FORMAT_MAX, }; -enum trigger_delay { - TRIGGER_DELAY_NEXT_PIXEL = 0, - TRIGGER_DELAY_NEXT_LINE, -}; - -enum crtc_event { - CRTC_EVENT_VSYNC_RISING = 0, - CRTC_EVENT_VSYNC_FALLING -}; - -struct crtc_trigger_info { - bool enabled; - struct dc_stream_state *event_source; - enum crtc_event event; - enum trigger_delay delay; -}; - -struct dc_crtc_timing_adjust { - uint32_t v_total_min; - uint32_t v_total_max; - uint32_t v_total_mid; - uint32_t v_total_mid_frame_num; -}; - #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT struct dc_dsc_config { uint32_t num_slices_h; /* Number of DSC slices - horizontal */ @@ -804,6 +760,33 @@ struct dc_crtc_timing { #endif }; +#ifndef AMD_EDID_UTILITY + +enum trigger_delay { + TRIGGER_DELAY_NEXT_PIXEL = 0, + TRIGGER_DELAY_NEXT_LINE, +}; + +enum crtc_event { + CRTC_EVENT_VSYNC_RISING = 0, + CRTC_EVENT_VSYNC_FALLING +}; + +struct crtc_trigger_info { + bool enabled; + struct dc_stream_state *event_source; + enum crtc_event event; + enum trigger_delay delay; +}; + +struct dc_crtc_timing_adjust { + uint32_t v_total_min; + uint32_t v_total_max; + uint32_t v_total_mid; + uint32_t v_total_mid_frame_num; +}; + + /* Passed on init */ enum vram_type { VIDEO_MEMORY_TYPE_GDDR5 = 2, @@ -874,5 +857,7 @@ struct tg_color { uint16_t color_b_cb; }; +#endif /* AMD_EDID_UTILITY */ + #endif /* DC_HW_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 9ea75db3484e..f24fd19ed93d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -126,7 +126,8 @@ struct dc_link { unsigned short chip_caps; unsigned int dpcd_sink_count; enum edp_revision edp_revision; - bool psr_enabled; + bool psr_feature_enabled; + bool psr_allow_active; /* MST record stream using this link */ struct link_flags { @@ -158,6 +159,18 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ return dc->links[link_index]; } +static inline struct dc_link *get_edp_link(const struct dc *dc) +{ + int i; + + // report any eDP links, even unconnected DDI's + for (i = 0; i < dc->link_count; i++) { + if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) + return dc->links[i]; + } + return NULL; +} + /* Set backlight level of an embedded panel (eDP, LVDS). * backlight_pwm_u16_16 is unsigned 32 bit with 16 bit integer * and 16 bit fractional, where 1.0 is max backlight value. @@ -170,7 +183,7 @@ int dc_link_get_backlight_level(const struct dc_link *dc_link); bool dc_link_set_abm_disable(const struct dc_link *dc_link); -bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait); +bool dc_link_set_psr_allow_active(struct dc_link *dc_link, bool enable, bool wait); bool dc_link_get_psr_state(const struct dc_link *dc_link, uint32_t *psr_state); @@ -192,6 +205,7 @@ enum dc_detect_reason { bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); bool dc_link_get_hpd_state(struct dc_link *dc_link); +enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). * Return: diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index 0fa1c26bc20d..fdb6adc37857 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -113,6 +113,21 @@ struct periodic_interrupt_config { int lines_offset; }; +union stream_update_flags { + struct { + uint32_t scaling:1; + uint32_t out_tf:1; + uint32_t out_csc:1; + uint32_t abm_level:1; + uint32_t dpms_off:1; + uint32_t gamut_remap:1; +#if defined(CONFIG_DRM_AMD_DC_DCN2_0) + uint32_t wb_update:1; +#endif + } bits; + + uint32_t raw; +}; struct dc_stream_state { // sink is deprecated, new code should not reference @@ -214,9 +229,14 @@ struct dc_stream_state { #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT bool is_dsc_enabled; #endif + union stream_update_flags update_flags; }; +#define ABM_LEVEL_IMMEDIATE_DISABLE 0xFFFFFFFF + struct dc_stream_update { + struct dc_stream_state *stream; + struct rect src; struct rect dst; struct dc_transfer_func *out_transfer_func; @@ -431,6 +451,9 @@ void dc_stream_set_static_screen_events(struct dc *dc, int num_streams, const struct dc_static_screen_events *events); +void dc_stream_set_dyn_expansion(struct dc *dc, struct dc_stream_state *stream, + enum dc_dynamic_expansion option); + void dc_stream_set_dither_option(struct dc_stream_state *stream, enum dc_dither_option option); diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index b273735b6a3e..d9be8fc3889f 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -25,6 +25,11 @@ #ifndef DC_TYPES_H_ #define DC_TYPES_H_ +#ifndef AMD_EDID_UTILITY +/* AND EdidUtility only needs a portion + * of this file, including the rest only + * causes additional issues. + */ #include "os_types.h" #include "fixed31_32.h" #include "irq_types.h" @@ -33,6 +38,10 @@ #include "dal_types.h" #include "grph_object_defs.h" +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "dm_cp_psp.h" +#endif + /* forward declarations */ struct dc_plane_state; struct dc_stream_state; @@ -100,6 +109,9 @@ struct dc_context { uint32_t dc_sink_id_count; uint32_t dc_stream_id_count; uint64_t fbc_gpu_addr; +#ifdef CONFIG_DRM_AMD_DC_HDCP + struct cp_psp cp_psp; +#endif }; @@ -159,6 +171,12 @@ enum dc_edid_status { EDID_THE_SAME, }; +enum act_return_status { + ACT_SUCCESS, + ACT_LINK_LOST, + ACT_FAILED +}; + /* audio capability from EDID*/ struct dc_cea_audio_mode { uint8_t format_code; /* ucData[0] [6:3]*/ @@ -739,6 +757,9 @@ struct dc_clock_config { uint32_t current_clock_khz;/*current clock in use*/ }; +#endif /*AMD_EDID_UTILITY*/ +//AMD EDID UTILITY does not need any of the above structures + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* DSC DPCD capabilities */ union dsc_slice_caps1 { @@ -810,4 +831,5 @@ struct dsc_dec_dpcd_caps { uint32_t branch_max_line_width; }; #endif + #endif /* DC_TYPES_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c index 58bd131d5b48..b8a3fc505c9b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_abm.c @@ -77,6 +77,9 @@ static bool dce_abm_set_pipe(struct abm *abm, uint32_t controller_id) /* notifyDMCUMsg */ REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1); + REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, + 1, 80000); + return true; } @@ -401,6 +404,10 @@ static bool dce_abm_init_backlight(struct abm *abm) /* Enable the backlight output */ REG_UPDATE(BL_PWM_CNTL, BL_PWM_EN, 1); + /* Disable fractional pwm if configured */ + REG_UPDATE(BL_PWM_CNTL, BL_PWM_FRACTIONAL_EN, + abm->ctx->dc->config.disable_fractional_pwm ? 0 : 1); + /* Unlock group 2 backlight registers */ REG_UPDATE(BL_PWM_GRP1_REG_LOCK, BL_PWM_GRP1_REG_LOCK, 0); @@ -489,9 +496,6 @@ void dce_abm_destroy(struct abm **abm) { struct dce_abm *abm_dce = TO_DCE_ABM(*abm); - if (abm_dce->base.dmcu_is_running == true) - abm_dce->base.funcs->set_abm_immediate_disable(*abm); - kfree(abm_dce); *abm = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c index c3f9f4185ce8..e472608faf33 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c @@ -42,6 +42,10 @@ #include "reg_helper.h" +#undef FN +#define FN(reg_name, field_name) \ + aux110->shift->field_name, aux110->mask->field_name + #define FROM_AUX_ENGINE(ptr) \ container_of((ptr), struct aux_engine_dce110, base) @@ -55,6 +59,14 @@ enum { AUX_TIMED_OUT_RETRY_COUNTER = 2, AUX_DEFER_RETRY_COUNTER = 6 }; + +#define TIME_OUT_INCREMENT 1016 +#define TIME_OUT_MULTIPLIER_8 8 +#define TIME_OUT_MULTIPLIER_16 16 +#define TIME_OUT_MULTIPLIER_32 32 +#define TIME_OUT_MULTIPLIER_64 64 +#define MAX_TIMEOUT_LENGTH 127 + static void release_engine( struct dce_aux *engine) { @@ -198,7 +210,7 @@ static void submit_channel_request( REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1); REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0, - 10, aux110->timeout_period/10); + 10, aux110->polling_timeout_period/10); /* set the delay and the number of bytes to write */ @@ -327,7 +339,7 @@ static enum aux_channel_operation_result get_channel_status( /* poll to make sure that SW_DONE is asserted */ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1, - 10, aux110->timeout_period/10); + 10, aux110->polling_timeout_period/10); value = REG_READ(AUX_SW_STATUS); /* in case HPD is LOW, exit AUX transaction */ @@ -414,20 +426,77 @@ void dce110_engine_destroy(struct dce_aux **engine) *engine = NULL; } + +static bool dce_aux_configure_timeout(struct ddc_service *ddc, + uint32_t timeout_in_us) +{ + uint32_t multiplier = 0; + uint32_t length = 0; + struct ddc *ddc_pin = ddc->ddc_pin; + struct dce_aux *aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; + struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(aux_engine); + + /* 1-Update polling timeout period */ + aux110->polling_timeout_period = timeout_in_us * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER; + + /* 2-Update aux timeout period length and multiplier */ + if (timeout_in_us <= TIME_OUT_INCREMENT) { + multiplier = 0; + length = timeout_in_us/TIME_OUT_MULTIPLIER_8; + if (timeout_in_us % TIME_OUT_MULTIPLIER_8 != 0) + length++; + } else if (timeout_in_us <= 2 * TIME_OUT_INCREMENT) { + multiplier = 1; + length = timeout_in_us/TIME_OUT_MULTIPLIER_16; + if (timeout_in_us % TIME_OUT_MULTIPLIER_16 != 0) + length++; + } else if (timeout_in_us <= 4 * TIME_OUT_INCREMENT) { + multiplier = 2; + length = timeout_in_us/TIME_OUT_MULTIPLIER_32; + if (timeout_in_us % TIME_OUT_MULTIPLIER_32 != 0) + length++; + } else if (timeout_in_us > 4 * TIME_OUT_INCREMENT) { + multiplier = 3; + length = timeout_in_us/TIME_OUT_MULTIPLIER_64; + if (timeout_in_us % TIME_OUT_MULTIPLIER_64 != 0) + length++; + } + + length = (length < MAX_TIMEOUT_LENGTH) ? length : MAX_TIMEOUT_LENGTH; + + REG_UPDATE_SEQ_2(AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, length, AUX_RX_TIMEOUT_LEN_MUL, multiplier); + + return true; +} + +static struct dce_aux_funcs aux_functions = { + .configure_timeout = NULL, + .destroy = NULL, +}; + struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110, struct dc_context *ctx, uint32_t inst, uint32_t timeout_period, - const struct dce110_aux_registers *regs) + const struct dce110_aux_registers *regs, + const struct dce110_aux_registers_mask *mask, + const struct dce110_aux_registers_shift *shift, + bool is_ext_aux_timeout_configurable) { aux_engine110->base.ddc = NULL; aux_engine110->base.ctx = ctx; aux_engine110->base.delay = 0; aux_engine110->base.max_defer_write_retry = 0; aux_engine110->base.inst = inst; - aux_engine110->timeout_period = timeout_period; + aux_engine110->polling_timeout_period = timeout_period; aux_engine110->regs = regs; + aux_engine110->mask = mask; + aux_engine110->shift = shift; + aux_engine110->base.funcs = &aux_functions; + if (is_ext_aux_timeout_configurable) + aux_engine110->base.funcs->configure_timeout = &dce_aux_configure_timeout; + return &aux_engine110->base; } @@ -464,8 +533,10 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, memset(&aux_rep, 0, sizeof(aux_rep)); aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en]; - if (!acquire(aux_engine, ddc_pin)) + if (!acquire(aux_engine, ddc_pin)) { + *operation_result = AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE; return -1; + } if (payload->i2c_over_aux) aux_req.type = AUX_TRANSACTION_TYPE_I2C; @@ -475,7 +546,7 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, aux_req.action = i2caux_action_from_payload(payload); aux_req.address = payload->address; - aux_req.delay = payload->defer_delay * 10; + aux_req.delay = 0; aux_req.length = payload->length; aux_req.data = payload->data; @@ -544,8 +615,15 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, case AUX_TRANSACTION_REPLY_AUX_DEFER: case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK: case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER: - if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) + if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) { goto fail; + } else { + if ((*payload->reply == AUX_TRANSACTION_REPLY_AUX_DEFER) || + (*payload->reply == AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER)) { + if (payload->defer_delay > 0) + msleep(payload->defer_delay); + } + } break; case AUX_TRANSACTION_REPLY_I2C_DEFER: @@ -582,6 +660,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc, break; case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON: + case AUX_CHANNEL_OPERATION_FAILED_ENGINE_ACQUIRE: case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN: default: goto fail; diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h index ed7fec8fe253..b4b2c79a8073 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h @@ -29,6 +29,7 @@ #include "i2caux_interface.h" #include "inc/hw/aux_engine.h" + #ifdef CONFIG_DRM_AMD_DC_DCN2_0 #define AUX_COMMON_REG_LIST0(id)\ SRI(AUX_CONTROL, DP_AUX, id), \ @@ -36,6 +37,7 @@ SRI(AUX_SW_DATA, DP_AUX, id), \ SRI(AUX_SW_CONTROL, DP_AUX, id), \ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \ + SRI(AUX_DPHY_RX_CONTROL1, DP_AUX, id), \ SRI(AUX_SW_STATUS, DP_AUX, id) #endif @@ -55,6 +57,7 @@ struct dce110_aux_registers { uint32_t AUX_SW_DATA; uint32_t AUX_SW_CONTROL; uint32_t AUX_INTERRUPT_CONTROL; + uint32_t AUX_DPHY_RX_CONTROL1; uint32_t AUX_SW_STATUS; uint32_t AUXN_IMPCAL; uint32_t AUXP_IMPCAL; @@ -62,6 +65,156 @@ struct dce110_aux_registers { uint32_t AUX_RESET_MASK; }; +#define DCE_AUX_REG_FIELD_LIST(type)\ + type AUX_EN;\ + type AUX_RESET;\ + type AUX_RESET_DONE;\ + type AUX_REG_RW_CNTL_STATUS;\ + type AUX_SW_USE_AUX_REG_REQ;\ + type AUX_SW_DONE_USING_AUX_REG;\ + type AUX_SW_AUTOINCREMENT_DISABLE;\ + type AUX_SW_DATA_RW;\ + type AUX_SW_INDEX;\ + type AUX_SW_GO;\ + type AUX_SW_DATA;\ + type AUX_SW_REPLY_BYTE_COUNT;\ + type AUX_SW_DONE;\ + type AUX_SW_DONE_ACK;\ + type AUXN_IMPCAL_ENABLE;\ + type AUXP_IMPCAL_ENABLE;\ + type AUXN_IMPCAL_OVERRIDE_ENABLE;\ + type AUXP_IMPCAL_OVERRIDE_ENABLE;\ + type AUX_RX_TIMEOUT_LEN;\ + type AUX_RX_TIMEOUT_LEN_MUL;\ + type AUXN_CALOUT_ERROR_AK;\ + type AUXP_CALOUT_ERROR_AK;\ + type AUX_SW_START_DELAY;\ + type AUX_SW_WR_BYTES + +#define DCE10_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +#define DCE_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +#define DCE12_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +/* DCN10 MASK */ +#define DCN10_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_ENABLE, mask_sh),\ + AUX_SF(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE, mask_sh),\ + AUX_SF(AUXN_IMPCAL, AUXN_IMPCAL_OVERRIDE_ENABLE, mask_sh) + +/* for all other DCN */ +#define DCN_AUX_MASK_SH_LIST(mask_sh)\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_EN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET, mask_sh),\ + AUX_SF(DP_AUX0_AUX_CONTROL, AUX_RESET_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_REG_RW_CNTL_STATUS, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, mask_sh),\ + AUX_SF(DP_AUX0_AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_START_DELAY, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_WR_BYTES, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_CONTROL, AUX_SW_GO, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA_RW, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_AUTOINCREMENT_DISABLE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_INDEX, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_DATA, AUX_SW_DATA, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT, mask_sh),\ + AUX_SF(DP_AUX0_AUX_SW_STATUS, AUX_SW_DONE, mask_sh),\ + AUX_SF(DP_AUX0_AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, mask_sh),\ + AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN, mask_sh),\ + AUX_SF(DP_AUX0_AUX_DPHY_RX_CONTROL1, AUX_RX_TIMEOUT_LEN_MUL, mask_sh) + +#define AUX_SF(reg_name, field_name, post_fix)\ + .field_name = reg_name ## __ ## field_name ## post_fix + enum { /* This is the timeout as defined in DP 1.2a, * 2.3.4 "Detailed uPacket TX AUX CH State Description". */ @@ -97,20 +250,34 @@ struct dce_aux { uint32_t max_defer_write_retry; bool acquire_reset; + struct dce_aux_funcs *funcs; +}; + +struct dce110_aux_registers_mask { + DCE_AUX_REG_FIELD_LIST(uint32_t); +}; + +struct dce110_aux_registers_shift { + DCE_AUX_REG_FIELD_LIST(uint8_t); }; + struct aux_engine_dce110 { struct dce_aux base; const struct dce110_aux_registers *regs; + const struct dce110_aux_registers_mask *mask; + const struct dce110_aux_registers_shift *shift; struct { uint32_t aux_control; uint32_t aux_arb_control; uint32_t aux_sw_data; uint32_t aux_sw_control; uint32_t aux_interrupt_control; + uint32_t aux_dphy_rx_control1; + uint32_t aux_dphy_rx_control0; uint32_t aux_sw_status; } addr; - uint32_t timeout_period; + uint32_t polling_timeout_period; }; struct aux_engine_dce110_init_data { @@ -120,12 +287,15 @@ struct aux_engine_dce110_init_data { const struct dce110_aux_registers *regs; }; -struct dce_aux *dce110_aux_engine_construct( - struct aux_engine_dce110 *aux_engine110, +struct dce_aux *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110, struct dc_context *ctx, uint32_t inst, uint32_t timeout_period, - const struct dce110_aux_registers *regs); + const struct dce110_aux_registers *regs, + + const struct dce110_aux_registers_mask *mask, + const struct dce110_aux_registers_shift *shift, + bool is_ext_aux_timeout_configurable); void dce110_engine_destroy(struct dce_aux **engine); @@ -139,4 +309,13 @@ int dce_aux_transfer_raw(struct ddc_service *ddc, bool dce_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *cmd); + +struct dce_aux_funcs { + bool (*configure_timeout) + (struct ddc_service *ddc, + uint32_t timeout); + void (*destroy) + (struct aux_engine **ptr); +}; + #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c index 0b86cee4876f..ba995d3f2318 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c @@ -907,9 +907,6 @@ void dce_dmcu_destroy(struct dmcu **dmcu) { struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(*dmcu); - if (dmcu_dce->base.dmcu_state == DMCU_RUNNING) - dmcu_dce->base.funcs->set_psr_enable(*dmcu, false, true); - kfree(dmcu_dce); *dmcu = NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h index ac04d77058f0..32d145a0d6fc 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_hwseq.h @@ -679,6 +679,7 @@ struct dce_hwseq_registers { HWS_SF(, DOMAIN17_PG_STATUS, DOMAIN17_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DOMAIN18_PG_STATUS, DOMAIN18_PGFSM_PWR_STATUS, mask_sh), \ HWS_SF(, DC_IP_REQUEST_CNTL, IP_REQUEST_EN, mask_sh), \ + HWSEQ_LVTMA_MASK_SH_LIST(mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_CNTL, LVTMA_BLON, mask_sh), \ HWS_SF(, LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, mask_sh) #endif diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c index 31b698bf9cfc..8aa937f496c4 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c @@ -606,11 +606,11 @@ static void dce_mi_allocate_dmif( } if (dce_mi->wa.single_head_rdreq_dmif_limit) { - uint32_t eanble = (total_stream_num > 1) ? 0 : + uint32_t enable = (total_stream_num > 1) ? 0 : dce_mi->wa.single_head_rdreq_dmif_limit; REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT, - ENABLE, eanble); + ENABLE, enable); } } @@ -636,11 +636,11 @@ static void dce_mi_free_dmif( 10, 3500); if (dce_mi->wa.single_head_rdreq_dmif_limit) { - uint32_t eanble = (total_stream_num > 1) ? 0 : + uint32_t enable = (total_stream_num > 1) ? 0 : dce_mi->wa.single_head_rdreq_dmif_limit; REG_UPDATE(MC_HUB_RDREQ_DMIF_LIMIT, - ENABLE, eanble); + ENABLE, enable); } } diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c index 76d54885374a..a5e122c721ec 100644 --- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c @@ -399,6 +399,37 @@ static const struct dc_plane_cap plane_cap = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -506,6 +537,14 @@ static const struct dce_mem_input_mask mi_masks = { .ENABLE = MC_HUB_RDREQ_DMIF_LIMIT__ENABLE_MASK }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE10_AUX_MASK_SH_LIST(_MASK) +}; + static struct mem_input *dce100_mem_input_create( struct dc_context *ctx, uint32_t inst) @@ -571,14 +610,18 @@ struct link_encoder *dce100_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -611,7 +654,10 @@ struct dce_aux *dce100_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -997,6 +1043,8 @@ static bool construct( dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; dc->caps.disable_dp_clk_share = true; + dc->caps.extended_aux_timeout_support = false; + for (i = 0; i < pool->base.pipe_count; i++) { pool->base.timing_generators[i] = dce100_timing_generator_create( diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 01a924bf477a..f0e837d14000 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -944,7 +944,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) { /* notify audio driver for audio modes of monitor */ struct dc *core_dc; - struct pp_smu_funcs *pp_smu = NULL; struct clk_mgr *clk_mgr; unsigned int i, num_audio = 1; @@ -957,9 +956,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == true) return; - if (core_dc->res_pool->pp_smu) - pp_smu = core_dc->res_pool->pp_smu; - if (pipe_ctx->stream_res.audio) { for (i = 0; i < MAX_PIPES; i++) { /*current_state not updated yet*/ @@ -984,7 +980,6 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) { struct dc *dc; - struct pp_smu_funcs *pp_smu = NULL; struct clk_mgr *clk_mgr; if (!pipe_ctx || !pipe_ctx->stream) @@ -1001,9 +996,6 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.audio) { pipe_ctx->stream_res.audio->enabled = false; - if (dc->res_pool->pp_smu) - pp_smu = dc->res_pool->pp_smu; - if (dc_is_dp_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( pipe_ctx->stream_res.stream_enc); @@ -1169,8 +1161,9 @@ static void build_audio_output( } } - if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || - pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + if (state->clk_mgr && + (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)) { audio_output->pll_info.dp_dto_source_clock_in_khz = state->clk_mgr->funcs->get_dp_ref_clk_frequency( state->clk_mgr); @@ -1418,7 +1411,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; - pipe_ctx->stream->link->psr_enabled = false; + pipe_ctx->stream->link->psr_feature_enabled = false; return DC_OK; } @@ -1428,8 +1421,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( static void power_down_encoders(struct dc *dc) { int i; - enum connector_id connector_id; - enum signal_type signal = SIGNAL_TYPE_NONE; /* do not know BIOS back-front mapping, simply blank all. It will not * hurt for non-DP @@ -1440,15 +1431,12 @@ static void power_down_encoders(struct dc *dc) } for (i = 0; i < dc->link_count; i++) { - connector_id = dal_graphics_object_id_get_connector_id(dc->links[i]->link_id); - if ((connector_id == CONNECTOR_ID_DISPLAY_PORT) || - (connector_id == CONNECTOR_ID_EDP)) { + enum signal_type signal = dc->links[i]->connector_signal; + if ((signal == SIGNAL_TYPE_EDP) || + (signal == SIGNAL_TYPE_DISPLAY_PORT)) if (!dc->links[i]->wa_flags.dp_keep_receiver_powered) dp_receiver_power_ctrl(dc->links[i], false); - if (connector_id == CONNECTOR_ID_EDP) - signal = SIGNAL_TYPE_EDP; - } dc->links[i]->link_enc->funcs->disable_output( dc->links[i]->link_enc, signal); @@ -1529,18 +1517,6 @@ static struct dc_stream_state *get_edp_stream(struct dc_state *context) return NULL; } -static struct dc_link *get_edp_link(struct dc *dc) -{ - int i; - - // report any eDP links, even unconnected DDI's - for (i = 0; i < dc->link_count; i++) { - if (dc->links[i]->connector_signal == SIGNAL_TYPE_EDP) - return dc->links[i]; - } - return NULL; -} - static struct dc_link *get_edp_link_with_sink( struct dc *dc, struct dc_state *context) @@ -1834,7 +1810,7 @@ static bool should_enable_fbc(struct dc *dc, return false; /* PSR should not be enabled */ - if (pipe_ctx->stream->link->psr_enabled) + if (pipe_ctx->stream->link->psr_feature_enabled) return false; /* Nothing to compress */ @@ -2464,7 +2440,6 @@ static void dce110_program_front_end_for_pipe( struct dc *dc, struct pipe_ctx *pipe_ctx) { struct mem_input *mi = pipe_ctx->plane_res.mi; - struct pipe_ctx *old_pipe = NULL; struct dc_plane_state *plane_state = pipe_ctx->plane_state; struct xfm_grph_csc_adjustment adjust; struct out_csc_color_matrix tbl_entry; @@ -2472,9 +2447,6 @@ static void dce110_program_front_end_for_pipe( DC_LOGGER_INIT(); memset(&tbl_entry, 0, sizeof(tbl_entry)); - if (dc->current_state) - old_pipe = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; - memset(&adjust, 0, sizeof(adjust)); adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c index 89620adc81d8..83a4dbf6d76e 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c @@ -275,6 +275,14 @@ static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE110(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE_AUX_MASK_SH_LIST(_MASK) +}; + #define opp_regs(id)\ [id] = {\ OPP_DCE_110_REG_LIST(id),\ @@ -440,6 +448,37 @@ static const struct dc_plane_cap underlay_plane_cap = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -617,14 +656,18 @@ static struct link_encoder *dce110_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -657,7 +700,10 @@ struct dce_aux *dce110_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -1293,6 +1339,7 @@ static bool construct( dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.is_apu = true; + dc->caps.extended_aux_timeout_support = false; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c index 21a657e79306..97dcc5d0862b 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c @@ -172,6 +172,14 @@ static const struct dce_abm_mask abm_mask = { ABM_MASK_SH_LIST_DCE110(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE_AUX_MASK_SH_LIST(_MASK) +}; + #define ipp_regs(id)\ [id] = {\ IPP_DCE110_REG_LIST_DCE_BASE(id)\ @@ -417,6 +425,37 @@ static const struct dc_plane_cap plane_cap = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -575,14 +614,18 @@ struct link_encoder *dce112_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -630,7 +673,10 @@ struct dce_aux *dce112_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -1163,7 +1209,7 @@ static bool construct( dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; - + dc->caps.extended_aux_timeout_support = false; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c index 7c52f7f9196c..63543f6918ff 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c @@ -293,6 +293,14 @@ static const struct dce_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCE120(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE12_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE12_AUX_MASK_SH_LIST(_MASK) +}; + #define opp_regs(id)\ [id] = {\ OPP_DCE_120_REG_LIST(id),\ @@ -356,6 +364,37 @@ static const struct dce_audio_mask audio_mask = { DCE120_AUD_COMMON_MASK_SH_LIST(_MASK) }; +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + #define clk_src_regs(index, id)\ [index] = {\ CS_COMMON_REG_LIST_DCE_112(id),\ @@ -404,7 +443,10 @@ struct dce_aux *dce120_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -655,14 +697,18 @@ static struct link_encoder *dce120_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); @@ -1006,7 +1052,7 @@ static bool construct( dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; dc->caps.psp_setup_panel_mode = true; - + dc->caps.extended_aux_timeout_support = false; dc->debug = debug_defaults; /************************************************* diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c index 643ccb0ade00..3e8d4b49f279 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c @@ -288,6 +288,14 @@ static const struct dce_opp_mask opp_mask = { OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCE10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCE10_AUX_MASK_SH_LIST(_MASK) +}; + #define aux_engine_regs(id)\ [id] = {\ AUX_COMMON_REG_LIST(id), \ @@ -431,6 +439,37 @@ static const struct dce_abm_mask abm_mask = { #define CC_DC_HDMI_STRAPS__AUDIO_STREAM_NUMBER__SHIFT 0x8 #endif +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + case TRANSMITTER_UNIPHY_G: + return 6; + break; + default: + ASSERT(0); + return 0; + } +} + static void read_dce_straps( struct dc_context *ctx, struct resource_straps *straps) @@ -491,7 +530,10 @@ struct dce_aux *dce80_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -669,14 +711,18 @@ struct link_encoder *dce80_link_encoder_create( { struct dce110_link_encoder *enc110 = kzalloc(sizeof(struct dce110_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc110) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dce110_link_encoder_construct(enc110, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source]); return &enc110->base; @@ -895,6 +941,7 @@ static bool dce80_construct( dc->caps.i2c_speed_in_khz = 40; dc->caps.max_cursor_size = 128; dc->caps.dual_link_dvi = true; + dc->caps.extended_aux_timeout_support = false; /************************************************* * Create resources * diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c index d8b2da18db39..997e9582edc7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.c @@ -129,7 +129,7 @@ void dpp_set_gamut_remap_bypass(struct dcn10_dpp *dpp) #define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19)) -static bool dpp_get_optimal_number_of_taps( +bool dpp1_get_optimal_number_of_taps( struct dpp *dpp, struct scaler_data *scl_data, const struct scaling_taps *in_taps) @@ -521,7 +521,7 @@ static const struct dpp_funcs dcn10_dpp_funcs = { .dpp_read_state = dpp_read_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp_get_optimal_number_of_taps, + .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, .dpp_set_csc_adjustment = dpp1_cm_set_output_csc_adjustment, .dpp_set_csc_default = dpp1_cm_set_output_csc_default, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h index e2c613611ac9..1d4a7d640334 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_dpp.h @@ -1504,6 +1504,11 @@ void dpp1_set_hdr_multiplier( struct dpp *dpp_base, uint32_t multiplier); +bool dpp1_get_optimal_number_of_taps( + struct dpp *dpp, + struct scaler_data *scl_data, + const struct scaling_taps *in_taps); + void dpp1_construct(struct dcn10_dpp *dpp1, struct dc_context *ctx, uint32_t inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c index 001db49e4bb2..14d1be6c66e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c @@ -841,6 +841,14 @@ void min_set_viewport( REG_SET_2(DCSURF_PRI_VIEWPORT_START_C, 0, PRI_VIEWPORT_X_START_C, viewport_c->x, PRI_VIEWPORT_Y_START_C, viewport_c->y); + + REG_SET_2(DCSURF_SEC_VIEWPORT_DIMENSION_C, 0, + SEC_VIEWPORT_WIDTH_C, viewport_c->width, + SEC_VIEWPORT_HEIGHT_C, viewport_c->height); + + REG_SET_2(DCSURF_SEC_VIEWPORT_START_C, 0, + SEC_VIEWPORT_X_START_C, viewport_c->x, + SEC_VIEWPORT_Y_START_C, viewport_c->y); } void hubp1_read_state_common(struct hubp *hubp) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h index cb20d10288c0..ae70d9c0aa1d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.h @@ -47,6 +47,8 @@ SRI(DCSURF_SEC_VIEWPORT_START, HUBP, id), \ SRI(DCSURF_PRI_VIEWPORT_DIMENSION_C, HUBP, id), \ SRI(DCSURF_PRI_VIEWPORT_START_C, HUBP, id), \ + SRI(DCSURF_SEC_VIEWPORT_DIMENSION_C, HUBP, id), \ + SRI(DCSURF_SEC_VIEWPORT_START_C, HUBP, id), \ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS, HUBPREQ, id),\ SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, HUBPREQ, id),\ @@ -57,8 +59,12 @@ SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_SURFACE_ADDRESS_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_SURFACE_ADDRESS_C, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ SRI(DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, HUBPREQ, id),\ + SRI(DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, HUBPREQ, id),\ SRI(DCSURF_SURFACE_INUSE, HUBPREQ, id),\ SRI(DCSURF_SURFACE_INUSE_HIGH, HUBPREQ, id),\ SRI(DCSURF_SURFACE_INUSE_C, HUBPREQ, id),\ @@ -150,6 +156,8 @@ uint32_t DCSURF_SEC_VIEWPORT_START; \ uint32_t DCSURF_PRI_VIEWPORT_DIMENSION_C; \ uint32_t DCSURF_PRI_VIEWPORT_START_C; \ + uint32_t DCSURF_SEC_VIEWPORT_DIMENSION_C; \ + uint32_t DCSURF_SEC_VIEWPORT_START_C; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS; \ uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH; \ @@ -160,8 +168,12 @@ uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C; \ uint32_t DCSURF_PRIMARY_SURFACE_ADDRESS_C; \ + uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C; \ + uint32_t DCSURF_SECONDARY_SURFACE_ADDRESS_C; \ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C; \ uint32_t DCSURF_PRIMARY_META_SURFACE_ADDRESS_C; \ + uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C; \ + uint32_t DCSURF_SECONDARY_META_SURFACE_ADDRESS_C; \ uint32_t DCSURF_SURFACE_INUSE; \ uint32_t DCSURF_SURFACE_INUSE_HIGH; \ uint32_t DCSURF_SURFACE_INUSE_C; \ @@ -279,6 +291,10 @@ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_DIMENSION_C, PRI_VIEWPORT_HEIGHT_C, mask_sh),\ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_X_START_C, mask_sh),\ HUBP_SF(HUBP0_DCSURF_PRI_VIEWPORT_START_C, PRI_VIEWPORT_Y_START_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_WIDTH_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_DIMENSION_C, SEC_VIEWPORT_HEIGHT_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_X_START_C, mask_sh),\ + HUBP_SF(HUBP0_DCSURF_SEC_VIEWPORT_START_C, SEC_VIEWPORT_Y_START_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH, PRIMARY_SURFACE_ADDRESS_HIGH, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS, PRIMARY_SURFACE_ADDRESS, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH, SECONDARY_SURFACE_ADDRESS_HIGH, mask_sh),\ @@ -289,8 +305,12 @@ HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS, SECONDARY_META_SURFACE_ADDRESS, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_HIGH_C, PRIMARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_SURFACE_ADDRESS_C, PRIMARY_SURFACE_ADDRESS_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_HIGH_C, SECONDARY_SURFACE_ADDRESS_HIGH_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_SURFACE_ADDRESS_C, SECONDARY_SURFACE_ADDRESS_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_HIGH_C, PRIMARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_PRIMARY_META_SURFACE_ADDRESS_C, PRIMARY_META_SURFACE_ADDRESS_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_HIGH_C, SECONDARY_META_SURFACE_ADDRESS_HIGH_C, mask_sh),\ + HUBP_SF(HUBPREQ0_DCSURF_SECONDARY_META_SURFACE_ADDRESS_C, SECONDARY_META_SURFACE_ADDRESS_C, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE, SURFACE_INUSE_ADDRESS, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_HIGH, SURFACE_INUSE_ADDRESS_HIGH, mask_sh),\ HUBP_SF(HUBPREQ0_DCSURF_SURFACE_INUSE_C, SURFACE_INUSE_ADDRESS_C, mask_sh),\ @@ -469,6 +489,10 @@ type PRI_VIEWPORT_HEIGHT_C; \ type PRI_VIEWPORT_X_START_C; \ type PRI_VIEWPORT_Y_START_C; \ + type SEC_VIEWPORT_WIDTH_C; \ + type SEC_VIEWPORT_HEIGHT_C; \ + type SEC_VIEWPORT_X_START_C; \ + type SEC_VIEWPORT_Y_START_C; \ type PRIMARY_SURFACE_ADDRESS_HIGH;\ type PRIMARY_SURFACE_ADDRESS;\ type SECONDARY_SURFACE_ADDRESS_HIGH;\ @@ -479,8 +503,12 @@ type SECONDARY_META_SURFACE_ADDRESS;\ type PRIMARY_SURFACE_ADDRESS_HIGH_C;\ type PRIMARY_SURFACE_ADDRESS_C;\ + type SECONDARY_SURFACE_ADDRESS_HIGH_C;\ + type SECONDARY_SURFACE_ADDRESS_C;\ type PRIMARY_META_SURFACE_ADDRESS_HIGH_C;\ type PRIMARY_META_SURFACE_ADDRESS_C;\ + type SECONDARY_META_SURFACE_ADDRESS_HIGH_C;\ + type SECONDARY_META_SURFACE_ADDRESS_C;\ type SURFACE_INUSE_ADDRESS;\ type SURFACE_INUSE_ADDRESS_HIGH;\ type SURFACE_INUSE_ADDRESS_C;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 60123db7ba02..eb91432621ab 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -670,6 +670,10 @@ static void dcn10_bios_golden_init(struct dc *dc) int i; bool allow_self_fresh_force_enable = true; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + if (dc->hwss.s0i3_golden_init_wa && dc->hwss.s0i3_golden_init_wa(dc)) + return; +#endif if (dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled) allow_self_fresh_force_enable = dc->res_pool->hubbub->funcs->is_allow_self_refresh_enabled(dc->res_pool->hubbub); @@ -1300,6 +1304,10 @@ static void dcn10_init_hw(struct dc *dc) } dc->hwss.enable_power_gating_plane(dc->hwseq, true); + + if (dc->clk_mgr->funcs->notify_wm_ranges) + dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr); + } static void dcn10_reset_hw_ctx_wrap( @@ -1452,15 +1460,15 @@ static void log_tf(struct dc_context *ctx, DC_LOG_ALL_TF_CHANNELS("Logging all channels..."); for (i = 0; i < hw_points_num; i++) { - DC_LOG_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value); - DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value); - DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value); + DC_LOG_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value); + DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value); + DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value); } for (i = hw_points_num; i < MAX_NUM_HW_POINTS; i++) { - DC_LOG_ALL_GAMMA("R\t%d\t%llu\n", i, tf->tf_pts.red[i].value); - DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu\n", i, tf->tf_pts.green[i].value); - DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu\n", i, tf->tf_pts.blue[i].value); + DC_LOG_ALL_GAMMA("R\t%d\t%llu", i, tf->tf_pts.red[i].value); + DC_LOG_ALL_TF_CHANNELS("G\t%d\t%llu", i, tf->tf_pts.green[i].value); + DC_LOG_ALL_TF_CHANNELS("B\t%d\t%llu", i, tf->tf_pts.blue[i].value); } } @@ -2304,8 +2312,7 @@ void update_dchubp_dpp( dc->res_pool->dccg->funcs->update_dpp_dto( dc->res_pool->dccg, dpp->inst, - pipe_ctx->plane_res.bw.dppclk_khz, - false); + pipe_ctx->plane_res.bw.dppclk_khz); else dc->clk_mgr->clks.dppclk_khz = should_divided_by_2 ? dc->clk_mgr->clks.dispclk_khz / 2 : @@ -2512,8 +2519,10 @@ static void program_all_pipe_in_tree( pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); + if (dc->hwss.setup_vupdate_interrupt) + dc->hwss.setup_vupdate_interrupt(pipe_ctx); + dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); } if (pipe_ctx->plane_state != NULL) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h index 8bf5f0f2301d..88fcc395adf5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h @@ -113,6 +113,20 @@ struct dcn10_link_enc_registers { uint32_t DIG_LANE_ENABLE; /* UNIPHY */ uint32_t CHANNEL_XBAR_CNTL; + /* DPCS */ + uint32_t RDPCSTX_PHY_CNTL3; + uint32_t RDPCSTX_PHY_CNTL4; + uint32_t RDPCSTX_PHY_CNTL5; + uint32_t RDPCSTX_PHY_CNTL6; + uint32_t RDPCSTX_PHY_CNTL7; + uint32_t RDPCSTX_PHY_CNTL8; + uint32_t RDPCSTX_PHY_CNTL9; + uint32_t RDPCSTX_PHY_CNTL10; + uint32_t RDPCSTX_PHY_CNTL11; + uint32_t RDPCSTX_PHY_CNTL12; + uint32_t RDPCSTX_PHY_CNTL13; + uint32_t RDPCSTX_PHY_CNTL14; + uint32_t RDPCSTX_PHY_CNTL15; /* indirect registers */ uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_2; uint32_t RAWLANE0_DIG_PCS_XF_RX_OVRD_IN_3; @@ -250,6 +264,10 @@ struct dcn10_link_enc_registers { type RDPCS_EXT_REFCLK_EN;\ type RDPCS_TX_FIFO_EN;\ type UNIPHY_LINK_ENABLE;\ + type UNIPHY_CHANNEL0_XBAR_SOURCE;\ + type UNIPHY_CHANNEL1_XBAR_SOURCE;\ + type UNIPHY_CHANNEL2_XBAR_SOURCE;\ + type UNIPHY_CHANNEL3_XBAR_SOURCE;\ type UNIPHY_CHANNEL0_INVERT;\ type UNIPHY_CHANNEL1_INVERT;\ type UNIPHY_CHANNEL2_INVERT;\ @@ -337,16 +355,46 @@ struct dcn10_link_enc_registers { type RDPCS_TX_FIFO_ERROR_MASK;\ type RDPCS_DPALT_DISABLE_TOGGLE_MASK;\ type RDPCS_DPALT_4LANE_TOGGLE_MASK;\ + type RDPCS_PHY_DPALT_DP4;\ type RDPCS_PHY_DPALT_DISABLE;\ type RDPCS_PHY_DPALT_DISABLE_ACK;\ type RDPCS_PHY_DP_MPLLB_V2I;\ type RDPCS_PHY_DP_MPLLB_FREQ_VCO;\ + type RDPCS_PHY_DP_MPLLB_CP_INT_GS;\ + type RDPCS_PHY_RX_VREF_CTRL;\ type RDPCS_PHY_DP_MPLLB_CP_INT;\ type RDPCS_PHY_DP_MPLLB_CP_PROP;\ type RDPCS_PHY_RX_REF_LD_VAL;\ type RDPCS_PHY_RX_VCO_LD_VAL;\ type DPCSTX_DEBUG_CONFIG; \ - type RDPCSTX_DEBUG_CONFIG + type RDPCSTX_DEBUG_CONFIG; \ + type RDPCS_PHY_DP_TX0_EQ_MAIN;\ + type RDPCS_PHY_DP_TX0_EQ_PRE;\ + type RDPCS_PHY_DP_TX0_EQ_POST;\ + type RDPCS_PHY_DP_TX1_EQ_MAIN;\ + type RDPCS_PHY_DP_TX1_EQ_PRE;\ + type RDPCS_PHY_DP_TX1_EQ_POST;\ + type RDPCS_PHY_DP_TX2_EQ_MAIN;\ + type RDPCS_PHY_DP_MPLLB_CP_PROP_GS;\ + type RDPCS_PHY_DP_TX2_EQ_PRE;\ + type RDPCS_PHY_DP_TX2_EQ_POST;\ + type RDPCS_PHY_DP_TX3_EQ_MAIN;\ + type RDPCS_PHY_DCO_RANGE;\ + type RDPCS_PHY_DCO_FINETUNE;\ + type RDPCS_PHY_DP_TX3_EQ_PRE;\ + type RDPCS_PHY_DP_TX3_EQ_POST;\ + type RDPCS_PHY_SUP_PRE_HP;\ + type RDPCS_PHY_DP_TX0_VREGDRV_BYP;\ + type RDPCS_PHY_DP_TX1_VREGDRV_BYP;\ + type RDPCS_PHY_DP_TX2_VREGDRV_BYP;\ + type RDPCS_PHY_DP_TX3_VREGDRV_BYP;\ + type RDPCS_DMCU_DPALT_DIS_BLOCK_REG;\ + type UNIPHYA_SOFT_RESET;\ + type UNIPHYB_SOFT_RESET;\ + type UNIPHYC_SOFT_RESET;\ + type UNIPHYD_SOFT_RESET;\ + type UNIPHYE_SOFT_RESET;\ + type UNIPHYF_SOFT_RESET #define DCN20_LINK_ENCODER_REG_FIELD_LIST(type) \ type DIG_LANE0EN;\ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index e9ebbbe256b4..0a9ad692f541 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -168,7 +168,10 @@ static void opp1_set_pixel_encoding( REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 0); break; case PIXEL_ENCODING_YCBCR422: - REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 1); + REG_UPDATE_3(FMT_CONTROL, + FMT_PIXEL_ENCODING, 1, + FMT_SUBSAMPLING_MODE, 2, + FMT_CBCR_BIT_REDUCTION_BYPASS, 0); break; case PIXEL_ENCODING_YCBCR420: REG_UPDATE(FMT_CONTROL, FMT_PIXEL_ENCODING, 2); @@ -237,6 +240,9 @@ void opp1_set_dyn_expansion( FMT_DYNAMIC_EXP_EN, 0, FMT_DYNAMIC_EXP_MODE, 0); + if (opp->dyn_expansion == DYN_EXPANSION_DISABLE) + return; + /*00 - 10-bit -> 12-bit dynamic expansion*/ /*01 - 8-bit -> 12-bit dynamic expansion*/ if (signal == SIGNAL_TYPE_HDMI_TYPE_A || diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h index 0f10adea000c..2c0ecfa5a643 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.h @@ -116,6 +116,8 @@ type FMT_RAND_G_SEED; \ type FMT_RAND_B_SEED; \ type FMT_PIXEL_ENCODING; \ + type FMT_SUBSAMPLING_MODE; \ + type FMT_CBCR_BIT_REDUCTION_BYPASS; \ type FMT_CLAMP_DATA_EN; \ type FMT_CLAMP_COLOR_FORMAT; \ type FMT_DYNAMIC_EXP_EN; \ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c index e74a07d03fde..dabccbd49ad4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c @@ -1230,59 +1230,25 @@ bool optc1_is_stereo_left_eye(struct timing_generator *optc) return ret; } -bool optc1_is_matching_timing(struct timing_generator *tg, - const struct dc_crtc_timing *otg_timing) +bool optc1_get_hw_timing(struct timing_generator *tg, + struct dc_crtc_timing *hw_crtc_timing) { - struct dc_crtc_timing hw_crtc_timing = {0}; struct dcn_otg_state s = {0}; - if (tg == NULL || otg_timing == NULL) + if (tg == NULL || hw_crtc_timing == NULL) return false; optc1_read_otg_state(DCN10TG_FROM_TG(tg), &s); - hw_crtc_timing.h_total = s.h_total + 1; - hw_crtc_timing.h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end); - hw_crtc_timing.h_front_porch = s.h_total + 1 - s.h_blank_start; - hw_crtc_timing.h_sync_width = s.h_sync_a_end - s.h_sync_a_start; + hw_crtc_timing->h_total = s.h_total + 1; + hw_crtc_timing->h_addressable = s.h_total - ((s.h_total - s.h_blank_start) + s.h_blank_end); + hw_crtc_timing->h_front_porch = s.h_total + 1 - s.h_blank_start; + hw_crtc_timing->h_sync_width = s.h_sync_a_end - s.h_sync_a_start; - hw_crtc_timing.v_total = s.v_total + 1; - hw_crtc_timing.v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end); - hw_crtc_timing.v_front_porch = s.v_total + 1 - s.v_blank_start; - hw_crtc_timing.v_sync_width = s.v_sync_a_end - s.v_sync_a_start; - - if (otg_timing->h_total != hw_crtc_timing.h_total) - return false; - - if (otg_timing->h_border_left != hw_crtc_timing.h_border_left) - return false; - - if (otg_timing->h_addressable != hw_crtc_timing.h_addressable) - return false; - - if (otg_timing->h_border_right != hw_crtc_timing.h_border_right) - return false; - - if (otg_timing->h_front_porch != hw_crtc_timing.h_front_porch) - return false; - - if (otg_timing->h_sync_width != hw_crtc_timing.h_sync_width) - return false; - - if (otg_timing->v_total != hw_crtc_timing.v_total) - return false; - - if (otg_timing->v_border_top != hw_crtc_timing.v_border_top) - return false; - - if (otg_timing->v_addressable != hw_crtc_timing.v_addressable) - return false; - - if (otg_timing->v_border_bottom != hw_crtc_timing.v_border_bottom) - return false; - - if (otg_timing->v_sync_width != hw_crtc_timing.v_sync_width) - return false; + hw_crtc_timing->v_total = s.v_total + 1; + hw_crtc_timing->v_addressable = s.v_total - ((s.v_total - s.v_blank_start) + s.v_blank_end); + hw_crtc_timing->v_front_porch = s.v_total + 1 - s.v_blank_start; + hw_crtc_timing->v_sync_width = s.v_sync_a_end - s.v_sync_a_start; return true; } @@ -1486,7 +1452,6 @@ static const struct timing_generator_funcs dcn10_tg_funcs = { .get_frame_count = optc1_get_vblank_counter, .get_scanoutpos = optc1_get_crtc_scanoutpos, .get_otg_active_size = optc1_get_otg_active_size, - .is_matching_timing = optc1_is_matching_timing, .set_early_control = optc1_set_early_control, /* used by enable_timing_synchronization. Not need for FPGA */ .wait_for_state = optc1_wait_for_state, @@ -1514,7 +1479,8 @@ static const struct timing_generator_funcs dcn10_tg_funcs = { .configure_crc = optc1_configure_crc, .set_vtg_params = optc1_set_vtg_params, .program_manual_trigger = optc1_program_manual_trigger, - .setup_manual_trigger = optc1_setup_manual_trigger + .setup_manual_trigger = optc1_setup_manual_trigger, + .get_hw_timing = optc1_get_hw_timing, }; void dcn10_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h index 83575599672e..c8d795b335ba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.h @@ -547,9 +547,8 @@ struct dcn_otg_state { void optc1_read_otg_state(struct optc *optc1, struct dcn_otg_state *s); -bool optc1_is_matching_timing( - struct timing_generator *tg, - const struct dc_crtc_timing *otg_timing); +bool optc1_get_hw_timing(struct timing_generator *tg, + struct dc_crtc_timing *hw_crtc_timing); bool optc1_validate_timing( struct timing_generator *optc, diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c index 1599bb971111..15640aedd664 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c @@ -319,6 +319,14 @@ static const struct dcn10_link_enc_mask le_mask = { LINK_ENCODER_MASK_SH_LIST_DCN10(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCN10_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN10_AUX_MASK_SH_LIST(_MASK) +}; + #define ipp_regs(id)\ [id] = {\ IPP_REG_LIST_DCN10(id),\ @@ -471,6 +479,28 @@ static const struct dcn_hubbub_mask hubbub_mask = { HUBBUB_MASK_SH_LIST_DCN10(_MASK) }; +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + default: + ASSERT(0); + return 0; + } +} + #define clk_src_regs(index, pllid)\ [index] = {\ CS_COMMON_REG_LIST_DCN1_0(index, pllid),\ @@ -642,7 +672,10 @@ struct dce_aux *dcn10_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -751,14 +784,18 @@ struct link_encoder *dcn10_link_encoder_create( { struct dcn10_link_encoder *enc10 = kzalloc(sizeof(struct dcn10_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc10) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dcn10_link_encoder_construct(enc10, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, @@ -1308,6 +1345,8 @@ static bool construct( dc->caps.max_slave_planes = 1; dc->caps.is_apu = true; dc->caps.post_blend_color_processing = false; + dc->caps.extended_aux_timeout_support = false; + /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */ dc->caps.force_dp_tps4_for_cp2520 = true; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c index 9aa258f3550b..06e5bbb4545c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c @@ -1553,6 +1553,66 @@ unsigned int enc1_dig_source_otg( return tg_inst; } +bool enc1_stream_encoder_dp_get_pixel_format( + struct stream_encoder *enc, + enum dc_pixel_encoding *encoding, + enum dc_color_depth *depth) +{ + uint32_t hw_encoding = 0; + uint32_t hw_depth = 0; + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + + if (enc == NULL || + encoding == NULL || + depth == NULL) + return false; + + REG_GET_2(DP_PIXEL_FORMAT, + DP_PIXEL_ENCODING, &hw_encoding, + DP_COMPONENT_DEPTH, &hw_depth); + + switch (hw_depth) { + case DP_COMPONENT_PIXEL_DEPTH_6BPC: + *depth = COLOR_DEPTH_666; + break; + case DP_COMPONENT_PIXEL_DEPTH_8BPC: + *depth = COLOR_DEPTH_888; + break; + case DP_COMPONENT_PIXEL_DEPTH_10BPC: + *depth = COLOR_DEPTH_101010; + break; + case DP_COMPONENT_PIXEL_DEPTH_12BPC: + *depth = COLOR_DEPTH_121212; + break; + case DP_COMPONENT_PIXEL_DEPTH_16BPC: + *depth = COLOR_DEPTH_161616; + break; + default: + *depth = COLOR_DEPTH_UNDEFINED; + break; + } + + switch (hw_encoding) { + case DP_PIXEL_ENCODING_TYPE_RGB444: + *encoding = PIXEL_ENCODING_RGB; + break; + case DP_PIXEL_ENCODING_TYPE_YCBCR422: + *encoding = PIXEL_ENCODING_YCBCR422; + break; + case DP_PIXEL_ENCODING_TYPE_YCBCR444: + case DP_PIXEL_ENCODING_TYPE_Y_ONLY: + *encoding = PIXEL_ENCODING_YCBCR444; + break; + case DP_PIXEL_ENCODING_TYPE_YCBCR420: + *encoding = PIXEL_ENCODING_YCBCR420; + break; + default: + *encoding = PIXEL_ENCODING_UNDEFINED; + break; + } + return true; +} + static const struct stream_encoder_funcs dcn10_str_enc_funcs = { .dp_set_stream_attribute = enc1_stream_encoder_dp_set_stream_attribute, @@ -1589,6 +1649,8 @@ static const struct stream_encoder_funcs dcn10_str_enc_funcs = { .dig_connect_to_otg = enc1_dig_connect_to_otg, .hdmi_reset_stream_attribute = enc1_reset_hdmi_stream_attribute, .dig_source_otg = enc1_dig_source_otg, + + .dp_get_pixel_format = enc1_stream_encoder_dp_get_pixel_format, }; void dcn10_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h index a512cbea00d1..c9cbc21d121e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.h @@ -621,4 +621,9 @@ void get_audio_clock_info( void enc1_reset_hdmi_stream_attribute( struct stream_encoder *enc); +bool enc1_stream_encoder_dp_get_pixel_format( + struct stream_encoder *enc, + enum dc_pixel_encoding *encoding, + enum dc_color_depth *depth); + #endif /* __DC_STREAM_ENCODER_DCN10_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c index 16476ed25536..1e1151356e60 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c @@ -44,16 +44,12 @@ #define DC_LOGGER \ dccg->ctx->logger -void dccg2_update_dpp_dto(struct dccg *dccg, - int dpp_inst, - int req_dppclk, - bool reduce_divider_only) +void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); if (dccg->ref_dppclk && req_dppclk) { int ref_dppclk = dccg->ref_dppclk; - int current_phase, current_modulo; ASSERT(req_dppclk <= ref_dppclk); /* need to clamp to 8 bits */ @@ -65,28 +61,9 @@ void dccg2_update_dpp_dto(struct dccg *dccg, if (req_dppclk > ref_dppclk) req_dppclk = ref_dppclk; } - - REG_GET_2(DPPCLK_DTO_PARAM[dpp_inst], - DPPCLK0_DTO_PHASE, ¤t_phase, - DPPCLK0_DTO_MODULO, ¤t_modulo); - - if (reduce_divider_only) { - // requested phase/modulo greater than current - if (req_dppclk * current_modulo >= current_phase * ref_dppclk) { - REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, req_dppclk, - DPPCLK0_DTO_MODULO, ref_dppclk); - } else { - REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, current_phase, - DPPCLK0_DTO_MODULO, current_modulo); - } - } else { - REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, - DPPCLK0_DTO_PHASE, req_dppclk, - DPPCLK0_DTO_MODULO, ref_dppclk); - } - + REG_SET_2(DPPCLK_DTO_PARAM[dpp_inst], 0, + DPPCLK0_DTO_PHASE, req_dppclk, + DPPCLK0_DTO_MODULO, ref_dppclk); REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_ENABLE[dpp_inst], 1); } else { @@ -119,32 +96,6 @@ void dccg2_get_dccg_ref_freq(struct dccg *dccg, void dccg2_init(struct dccg *dccg) { - struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); - - // Fallthrough intentional to program all available dpp_dto's - switch (dccg_dcn->base.ctx->dc->res_pool->pipe_count) { - case 6: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[5], 1); - /* Fall through */ - case 5: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[4], 1); - /* Fall through */ - case 4: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[3], 1); - /* Fall through */ - case 3: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[2], 1); - /* Fall through */ - case 2: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[1], 1); - /* Fall through */ - case 1: - REG_UPDATE(DPPCLK_DTO_CTRL, DPPCLK_DTO_DB_EN[0], 1); - break; - default: - ASSERT(false); - break; - } } static const struct dccg_funcs dccg2_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index 74a074a873cd..2205cb0204e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -97,7 +97,7 @@ struct dcn_dccg { const struct dccg_mask *dccg_mask; }; -void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk, bool raise_divider_only); +void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk); void dccg2_get_dccg_ref_freq(struct dccg *dccg, unsigned int xtalin_freq_inKhz, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index 2f5aade1e882..4d7e45892f08 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -376,13 +376,6 @@ bool dpp2_get_optimal_number_of_taps( struct scaler_data *scl_data, const struct scaling_taps *in_taps) { - uint32_t pixel_width; - - if (scl_data->viewport.width > scl_data->recout.width) - pixel_width = scl_data->recout.width; - else - pixel_width = scl_data->viewport.width; - /* Some ASICs does not support FP16 scaling, so we reject modes require this*/ if (scl_data->viewport.width != scl_data->h_active && scl_data->viewport.height != scl_data->v_active && @@ -464,7 +457,7 @@ static struct dpp_funcs dcn20_dpp_funcs = { .dpp_read_state = dpp20_read_state, .dpp_reset = dpp_reset, .dpp_set_scaler = dpp1_dscl_set_scaler_manual_scale, - .dpp_get_optimal_number_of_taps = dpp2_get_optimal_number_of_taps, + .dpp_get_optimal_number_of_taps = dpp1_get_optimal_number_of_taps, .dpp_set_gamut_remap = dpp1_cm_set_gamut_remap, .dpp_set_csc_adjustment = NULL, .dpp_set_csc_default = NULL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h index 290b2854bd2c..5b03b737b1d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.h @@ -30,16 +30,20 @@ #define TO_DCN20_DPP(dpp)\ container_of(dpp, struct dcn20_dpp, base) -#define TF_REG_LIST_DCN20(id) \ - TF_REG_LIST_DCN(id), \ +#define TF_REG_LIST_DCN20_COMMON_UPDATED(id) \ SRI(CM_BLNDGAM_LUT_WRITE_EN_MASK, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \ + SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id) + +#define TF_REG_LIST_DCN20_COMMON(id) \ SRI(CM_BLNDGAM_CONTROL, CM, id), \ SRI(CM_BLNDGAM_RAMB_START_CNTL_B, CM, id), \ SRI(CM_BLNDGAM_RAMB_START_CNTL_G, CM, id), \ SRI(CM_BLNDGAM_RAMB_START_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM, id), \ SRI(CM_BLNDGAM_RAMB_END_CNTL1_B, CM, id), \ SRI(CM_BLNDGAM_RAMB_END_CNTL2_B, CM, id), \ SRI(CM_BLNDGAM_RAMB_END_CNTL1_G, CM, id), \ @@ -66,9 +70,6 @@ SRI(CM_BLNDGAM_RAMA_START_CNTL_B, CM, id), \ SRI(CM_BLNDGAM_RAMA_START_CNTL_G, CM, id), \ SRI(CM_BLNDGAM_RAMA_START_CNTL_R, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM, id), \ - SRI(CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM, id), \ SRI(CM_BLNDGAM_RAMA_END_CNTL1_B, CM, id), \ SRI(CM_BLNDGAM_RAMA_END_CNTL2_B, CM, id), \ SRI(CM_BLNDGAM_RAMA_END_CNTL1_G, CM, id), \ @@ -147,7 +148,12 @@ SRI(CM_SHAPER_RAMA_REGION_28_29, CM, id), \ SRI(CM_SHAPER_RAMA_REGION_30_31, CM, id), \ SRI(CM_SHAPER_RAMA_REGION_32_33, CM, id), \ - SRI(CM_SHAPER_LUT_INDEX, CM, id), \ + SRI(CM_SHAPER_LUT_INDEX, CM, id) + +#define TF_REG_LIST_DCN20(id) \ + TF_REG_LIST_DCN(id), \ + TF_REG_LIST_DCN20_COMMON(id), \ + TF_REG_LIST_DCN20_COMMON_UPDATED(id), \ SRI(CURSOR_CONTROL, CURSOR0_, id), \ SRI(ALPHA_2BIT_LUT, CNVC_CFG, id), \ SRI(FCNV_FP_BIAS_R, CNVC_CFG, id), \ @@ -166,27 +172,41 @@ SRI(OBUF_MEM_PWR_CTRL, DSCL, id),\ SRI(DSCL_MEM_PWR_CTRL, DSCL, id) -#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\ - TF_REG_LIST_SH_MASK_DCN(mask_sh), \ + +#define TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh)\ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_CONTROL, CM_BLNDGAM_LUT_MODE, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \ + TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \ + TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh) + + +#define TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh)\ + TF_SF(CM0_CM_3DLUT_MODE, CM_3DLUT_MODE, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_START_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_B, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_G, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_SLOPE_CNTL_R, CM_BLNDGAM_RAMB_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_B, CM_BLNDGAM_RAMB_EXP_REGION_END_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_B, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_G, CM_BLNDGAM_RAMB_EXP_REGION_END_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_G, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL1_R, CM_BLNDGAM_RAMB_EXP_REGION_END_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMB_END_CNTL2_R, CM_BLNDGAM_RAMB_EXP_REGION_END_BASE_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_LUT_OFFSET, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMB_REGION_0_1, CM_BLNDGAM_RAMB_EXP_REGION1_LUT_OFFSET, mask_sh), \ @@ -261,18 +281,9 @@ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_START_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_START_SEGMENT_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_B, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_G, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_SLOPE_CNTL_R, CM_BLNDGAM_RAMA_EXP_REGION_LINEAR_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_B, CM_BLNDGAM_RAMA_EXP_REGION_END_B, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_B, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_B, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_G, CM_BLNDGAM_RAMA_EXP_REGION_END_G, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_G, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_G, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL1_R, CM_BLNDGAM_RAMA_EXP_REGION_END_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_SLOPE_R, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_RAMA_END_CNTL2_R, CM_BLNDGAM_RAMA_EXP_REGION_END_BASE_R, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_LUT_OFFSET, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION0_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_0_1, CM_BLNDGAM_RAMA_EXP_REGION1_LUT_OFFSET, mask_sh), \ @@ -341,9 +352,6 @@ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION32_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_LUT_OFFSET, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_RAMA_REGION_32_33, CM_BLNDGAM_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_EN_MASK, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_BLNDGAM_LUT_WRITE_EN_MASK, CM_BLNDGAM_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_LUT_INDEX, CM_BLNDGAM_LUT_INDEX, mask_sh), \ TF_SF(CM0_CM_BLNDGAM_LUT_DATA, CM_BLNDGAM_LUT_DATA, mask_sh), \ TF_SF(CM0_CM_MEM_PWR_CTRL, BLNDGAM_MEM_PWR_FORCE, mask_sh), \ @@ -356,7 +364,6 @@ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_WRITE_EN_MASK, mask_sh), \ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_RAM_SEL, mask_sh), \ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_30BIT_EN, mask_sh), \ - TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_3DLUT_READ_WRITE_CONTROL, CM_3DLUT_READ_SEL, mask_sh), \ TF_SF(CM0_CM_SHAPER_CONTROL, CM_SHAPER_LUT_MODE, mask_sh), \ TF_SF(CM0_CM_SHAPER_RAMB_START_CNTL_B, CM_SHAPER_RAMB_EXP_REGION_START_B, mask_sh), \ @@ -521,9 +528,14 @@ TF_SF(CM0_CM_SHAPER_RAMA_REGION_32_33, CM_SHAPER_RAMA_EXP_REGION33_NUM_SEGMENTS, mask_sh), \ TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_EN_MASK, mask_sh), \ TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_LUT_WRITE_SEL, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_WRITE_EN_MASK, CM_SHAPER_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_SHAPER_LUT_INDEX, CM_SHAPER_LUT_INDEX, mask_sh), \ - TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh), \ + TF_SF(CM0_CM_SHAPER_LUT_DATA, CM_SHAPER_LUT_DATA, mask_sh) + + +#define TF_REG_LIST_SH_MASK_DCN20(mask_sh)\ + TF_REG_LIST_SH_MASK_DCN(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_COMMON(mask_sh), \ + TF_REG_LIST_SH_MASK_DCN20_UPDATED(mask_sh), \ TF_SF(CM0_CM_DGAM_LUT_WRITE_EN_MASK, CM_DGAM_CONFIG_STATUS, mask_sh), \ TF_SF(CM0_CM_CONTROL, CM_BYPASS, mask_sh), \ TF_SF(CURSOR0_0_CURSOR_CONTROL, CURSOR_MODE, mask_sh), \ @@ -560,6 +572,7 @@ TF_SF(DSCL0_OBUF_MEM_PWR_CTRL, OBUF_MEM_PWR_FORCE, mask_sh),\ TF_SF(DSCL0_DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, mask_sh) + #define TF_REG_FIELD_LIST_DCN2_0(type) \ TF_REG_FIELD_LIST(type) \ type CM_BLNDGAM_LUT_DATA; \ @@ -593,6 +606,7 @@ type OBUF_MEM_PWR_FORCE;\ type LUT_MEM_PWR_FORCE + struct dcn2_dpp_shift { TF_REG_FIELD_LIST_DCN2_0(uint8_t); }; @@ -691,11 +705,6 @@ void dpp2_set_hdr_multiplier( struct dpp *dpp_base, uint32_t multiplier); -bool dpp2_get_optimal_number_of_taps( - struct dpp *dpp, - struct scaler_data *scl_data, - const struct scaling_taps *in_taps); - bool dpp2_construct(struct dcn20_dpp *dpp2, struct dc_context *ctx, uint32_t inst, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c index 1b419407af94..63eb377ed9c0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c @@ -118,7 +118,7 @@ static void dsc2_get_enc_caps(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock dsc_enc_caps->color_formats.bits.RGB = 1; dsc_enc_caps->color_formats.bits.YCBCR_444 = 1; - dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 0; + dsc_enc_caps->color_formats.bits.YCBCR_SIMPLE_422 = 1; dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_422 = 0; dsc_enc_caps->color_formats.bits.YCBCR_NATIVE_420 = 1; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c index cd8bc92ce3ba..880954ac0b02 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dwb_scl.c @@ -722,7 +722,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20, struct scaling_taps num_taps) { uint32_t h_ratio_luma = 1; - uint32_t h_ratio_chroma = 1; uint32_t h_taps_luma = num_taps.h_taps; uint32_t h_taps_chroma = num_taps.h_taps_c; int32_t h_init_phase_luma = 0; @@ -747,7 +746,6 @@ bool dwb_program_horz_scalar(struct dcn20_dwbc *dwbc20, h_ratio_luma = -1; else h_ratio_luma = dc_fixpt_u3d19(tmp_h_ratio_luma) << 5; - h_ratio_chroma = h_ratio_luma * 2; /*Program ratio*/ REG_UPDATE(WBSCL_HORZ_FILTER_SCALE_RATIO, WBSCL_H_SCALE_RATIO, h_ratio_luma); @@ -803,7 +801,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20, enum dwb_subsample_position subsample_position) { uint32_t v_ratio_luma = 1; - uint32_t v_ratio_chroma = 1; uint32_t v_taps_luma = num_taps.v_taps; uint32_t v_taps_chroma = num_taps.v_taps_c; int32_t v_init_phase_luma = 0; @@ -827,7 +824,6 @@ bool dwb_program_vert_scalar(struct dcn20_dwbc *dwbc20, v_ratio_luma = -1; else v_ratio_luma = dc_fixpt_u3d19(tmp_v_ratio_luma) << 5; - v_ratio_chroma = v_ratio_luma * 2; /*Program ratio*/ REG_UPDATE(WBSCL_VERT_FILTER_SCALE_RATIO, WBSCL_V_SCALE_RATIO, v_ratio_luma); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c index b83c022e2c6f..8b8438566101 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c @@ -186,14 +186,13 @@ static void hubbub2_get_blk256_size(unsigned int *blk256_width, unsigned int *bl } static void hubbub2_det_request_size( + unsigned int detile_buf_size, unsigned int height, unsigned int width, unsigned int bpe, bool *req128_horz_wc, bool *req128_vert_wc) { - unsigned int detile_buf_size = 164 * 1024; /* 164KB for DCN1.0 */ - unsigned int blk256_height = 0; unsigned int blk256_width = 0; unsigned int swath_bytes_horz_wc, swath_bytes_vert_wc; @@ -236,7 +235,8 @@ bool hubbub2_get_dcc_compression_cap(struct hubbub *hubbub, &segment_order_horz, &segment_order_vert)) return false; - hubbub2_det_request_size(input->surface_size.height, input->surface_size.width, + hubbub2_det_request_size(TO_DCN20_HUBBUB(hubbub)->detile_buf_size, + input->surface_size.height, input->surface_size.width, bpe, &req128_horz_wc, &req128_vert_wc); if (!req128_horz_wc && !req128_vert_wc) { @@ -588,7 +588,7 @@ static void hubbub2_program_watermarks( DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz); REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180); - hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); + hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter); } static const struct hubbub_funcs hubbub2_funcs = { @@ -600,7 +600,8 @@ static const struct hubbub_funcs hubbub2_funcs = { .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, .wm_read_state = hubbub2_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, - .program_watermarks = hubbub2_program_watermarks + .program_watermarks = hubbub2_program_watermarks, + .allow_self_refresh_control = hubbub1_allow_self_refresh_control }; void hubbub2_construct(struct dcn20_hubbub *hubbub, @@ -618,4 +619,5 @@ void hubbub2_construct(struct dcn20_hubbub *hubbub, hubbub->masks = hubbub_mask; hubbub->debug_test_index_pstate = 0xB; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h index 626117d3b4e9..501532dd523a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.h @@ -81,6 +81,7 @@ struct dcn20_hubbub { unsigned int debug_test_index_pstate; struct dcn_watermark_set watermarks; struct dcn20_vmid vmid[16]; + unsigned int detile_buf_size; }; void hubbub2_construct(struct dcn20_hubbub *hubbub, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index 1212da12c414..921a36668ced 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -688,7 +688,7 @@ bool dcn20_set_output_transfer_func(struct pipe_ctx *pipe_ctx, return true; } -static bool dcn20_set_blend_lut( +bool dcn20_set_blend_lut( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; @@ -710,7 +710,7 @@ static bool dcn20_set_blend_lut( return result; } -static bool dcn20_set_shaper_3dlut( +bool dcn20_set_shaper_3dlut( struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state) { struct dpp *dpp_base = pipe_ctx->plane_res.dpp; @@ -999,72 +999,6 @@ void dcn20_enable_plane( } -static void dcn20_program_pipe( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context) -{ - pipe_ctx->plane_state->update_flags.bits.full_update = - context->commit_hints.full_update_needed ? 1 : pipe_ctx->plane_state->update_flags.bits.full_update; - - if (pipe_ctx->plane_state->update_flags.bits.full_update) - dcn20_enable_plane(dc, pipe_ctx, context); - - update_dchubp_dpp(dc, pipe_ctx, context); - - set_hdr_multiplier(pipe_ctx); - - if (pipe_ctx->plane_state->update_flags.bits.full_update || - pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || - pipe_ctx->plane_state->update_flags.bits.gamma_change) - dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); - - /* dcn10_translate_regamma_to_hw_format takes 750us to finish - * only do gamma programming for full update. - * TODO: This can be further optimized/cleaned up - * Always call this for now since it does memcmp inside before - * doing heavy calculation and programming - */ - if (pipe_ctx->plane_state->update_flags.bits.full_update) - dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); -} - -static void dcn20_program_all_pipe_in_tree( - struct dc *dc, - struct pipe_ctx *pipe_ctx, - struct dc_state *context) -{ - if (pipe_ctx->top_pipe == NULL && !pipe_ctx->prev_odm_pipe) { - bool blank = !is_pipe_tree_visible(pipe_ctx); - - pipe_ctx->stream_res.tg->funcs->program_global_sync( - pipe_ctx->stream_res.tg, - pipe_ctx->pipe_dlg_param.vready_offset, - pipe_ctx->pipe_dlg_param.vstartup_start, - pipe_ctx->pipe_dlg_param.vupdate_offset, - pipe_ctx->pipe_dlg_param.vupdate_width); - - pipe_ctx->stream_res.tg->funcs->set_vtg_params( - pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - - dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); - - if (dc->hwss.update_odm) - dc->hwss.update_odm(dc, context, pipe_ctx); - } - - if (pipe_ctx->plane_state != NULL) - dcn20_program_pipe(dc, pipe_ctx, context); - - if (pipe_ctx->bottom_pipe != NULL) { - ASSERT(pipe_ctx->bottom_pipe != pipe_ctx); - dcn20_program_all_pipe_in_tree(dc, pipe_ctx->bottom_pipe, context); - } else if (pipe_ctx->next_odm_pipe != NULL) { - ASSERT(pipe_ctx->next_odm_pipe != pipe_ctx); - dcn20_program_all_pipe_in_tree(dc, pipe_ctx->next_odm_pipe, context); - } -} - void dcn20_pipe_control_lock_global( struct dc *dc, struct pipe_ctx *pipe, @@ -1124,114 +1058,456 @@ void dcn20_pipe_control_lock( } } -static void dcn20_apply_ctx_for_surface( - struct dc *dc, - const struct dc_stream_state *stream, - int num_planes, - struct dc_state *context) +static void dcn20_detect_pipe_changes(struct pipe_ctx *old_pipe, struct pipe_ctx *new_pipe) { - const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; - int i; - struct timing_generator *tg; - bool removed_pipe[6] = { false }; - bool interdependent_update = false; - struct pipe_ctx *top_pipe_to_program = - find_top_pipe_for_stream(dc, context, stream); - struct pipe_ctx *prev_top_pipe_to_program = - find_top_pipe_for_stream(dc, dc->current_state, stream); - DC_LOGGER_INIT(dc->ctx->logger); + new_pipe->update_flags.raw = 0; - if (!top_pipe_to_program) + /* Exit on unchanged, unused pipe */ + if (!old_pipe->plane_state && !new_pipe->plane_state) return; + /* Detect pipe enable/disable */ + if (!old_pipe->plane_state && new_pipe->plane_state) { + new_pipe->update_flags.bits.enable = 1; + new_pipe->update_flags.bits.mpcc = 1; + new_pipe->update_flags.bits.dppclk = 1; + new_pipe->update_flags.bits.hubp_interdependent = 1; + new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + new_pipe->update_flags.bits.gamut_remap = 1; + new_pipe->update_flags.bits.scaler = 1; + new_pipe->update_flags.bits.viewport = 1; + if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { + new_pipe->update_flags.bits.odm = 1; + new_pipe->update_flags.bits.global_sync = 1; + } + return; + } + if (old_pipe->plane_state && !new_pipe->plane_state) { + new_pipe->update_flags.bits.disable = 1; + return; + } - /* Carry over GSL groups in case the context is changing. */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe_ctx = - &dc->current_state->res_ctx.pipe_ctx[i]; - - if (pipe_ctx->stream == stream && - pipe_ctx->stream == old_pipe_ctx->stream) - pipe_ctx->stream_res.gsl_group = - old_pipe_ctx->stream_res.gsl_group; + /* Detect top pipe only changes */ + if (!new_pipe->top_pipe && !new_pipe->prev_odm_pipe) { + /* Detect odm changes */ + if ((old_pipe->next_odm_pipe && new_pipe->next_odm_pipe + && old_pipe->next_odm_pipe->pipe_idx != new_pipe->next_odm_pipe->pipe_idx) + || (!old_pipe->next_odm_pipe && new_pipe->next_odm_pipe) + || (old_pipe->next_odm_pipe && !new_pipe->next_odm_pipe) + || old_pipe->stream_res.opp != new_pipe->stream_res.opp) + new_pipe->update_flags.bits.odm = 1; + + /* Detect global sync changes */ + if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset + || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start + || old_pipe->pipe_dlg_param.vupdate_offset != new_pipe->pipe_dlg_param.vupdate_offset + || old_pipe->pipe_dlg_param.vupdate_width != new_pipe->pipe_dlg_param.vupdate_width) + new_pipe->update_flags.bits.global_sync = 1; } - tg = top_pipe_to_program->stream_res.tg; + /* + * Detect opp / tg change, only set on change, not on enable + * Assume mpcc inst = pipe index, if not this code needs to be updated + * since mpcc is what is affected by these. In fact all of our sequence + * makes this assumption at the moment with how hubp reset is matched to + * same index mpcc reset. + */ + if (old_pipe->stream_res.opp != new_pipe->stream_res.opp) + new_pipe->update_flags.bits.opp_changed = 1; + if (old_pipe->stream_res.tg != new_pipe->stream_res.tg) + new_pipe->update_flags.bits.tg_changed = 1; + + /* Detect mpcc blending changes, only dpp inst and bot matter here */ + if (old_pipe->plane_res.dpp != new_pipe->plane_res.dpp + || old_pipe->stream_res.opp != new_pipe->stream_res.opp + || (!old_pipe->bottom_pipe && new_pipe->bottom_pipe) + || (old_pipe->bottom_pipe && !new_pipe->bottom_pipe) + || (old_pipe->bottom_pipe && new_pipe->bottom_pipe + && old_pipe->bottom_pipe->plane_res.mpcc_inst + != new_pipe->bottom_pipe->plane_res.mpcc_inst)) + new_pipe->update_flags.bits.mpcc = 1; + + /* Detect dppclk change */ + if (old_pipe->plane_res.bw.dppclk_khz != new_pipe->plane_res.bw.dppclk_khz) + new_pipe->update_flags.bits.dppclk = 1; + + /* Check for scl update */ + if (memcmp(&old_pipe->plane_res.scl_data, &new_pipe->plane_res.scl_data, sizeof(struct scaler_data))) + new_pipe->update_flags.bits.scaler = 1; + /* Check for vp update */ + if (memcmp(&old_pipe->plane_res.scl_data.viewport, &new_pipe->plane_res.scl_data.viewport, sizeof(struct rect)) + || memcmp(&old_pipe->plane_res.scl_data.viewport_c, + &new_pipe->plane_res.scl_data.viewport_c, sizeof(struct rect))) + new_pipe->update_flags.bits.viewport = 1; + + /* Detect dlg/ttu/rq updates */ + { + struct _vcs_dpi_display_dlg_regs_st old_dlg_attr = old_pipe->dlg_regs; + struct _vcs_dpi_display_ttu_regs_st old_ttu_attr = old_pipe->ttu_regs; + struct _vcs_dpi_display_dlg_regs_st *new_dlg_attr = &new_pipe->dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *new_ttu_attr = &new_pipe->ttu_regs; + + /* Detect pipe interdependent updates */ + if (old_dlg_attr.dst_y_prefetch != new_dlg_attr->dst_y_prefetch || + old_dlg_attr.vratio_prefetch != new_dlg_attr->vratio_prefetch || + old_dlg_attr.vratio_prefetch_c != new_dlg_attr->vratio_prefetch_c || + old_dlg_attr.dst_y_per_vm_vblank != new_dlg_attr->dst_y_per_vm_vblank || + old_dlg_attr.dst_y_per_row_vblank != new_dlg_attr->dst_y_per_row_vblank || + old_dlg_attr.dst_y_per_vm_flip != new_dlg_attr->dst_y_per_vm_flip || + old_dlg_attr.dst_y_per_row_flip != new_dlg_attr->dst_y_per_row_flip || + old_dlg_attr.refcyc_per_meta_chunk_vblank_l != new_dlg_attr->refcyc_per_meta_chunk_vblank_l || + old_dlg_attr.refcyc_per_meta_chunk_vblank_c != new_dlg_attr->refcyc_per_meta_chunk_vblank_c || + old_dlg_attr.refcyc_per_meta_chunk_flip_l != new_dlg_attr->refcyc_per_meta_chunk_flip_l || + old_dlg_attr.refcyc_per_line_delivery_pre_l != new_dlg_attr->refcyc_per_line_delivery_pre_l || + old_dlg_attr.refcyc_per_line_delivery_pre_c != new_dlg_attr->refcyc_per_line_delivery_pre_c || + old_ttu_attr.refcyc_per_req_delivery_pre_l != new_ttu_attr->refcyc_per_req_delivery_pre_l || + old_ttu_attr.refcyc_per_req_delivery_pre_c != new_ttu_attr->refcyc_per_req_delivery_pre_c || + old_ttu_attr.refcyc_per_req_delivery_pre_cur0 != new_ttu_attr->refcyc_per_req_delivery_pre_cur0 || + old_ttu_attr.refcyc_per_req_delivery_pre_cur1 != new_ttu_attr->refcyc_per_req_delivery_pre_cur1 || + old_ttu_attr.min_ttu_vblank != new_ttu_attr->min_ttu_vblank || + old_ttu_attr.qos_level_flip != new_ttu_attr->qos_level_flip) { + old_dlg_attr.dst_y_prefetch = new_dlg_attr->dst_y_prefetch; + old_dlg_attr.vratio_prefetch = new_dlg_attr->vratio_prefetch; + old_dlg_attr.vratio_prefetch_c = new_dlg_attr->vratio_prefetch_c; + old_dlg_attr.dst_y_per_vm_vblank = new_dlg_attr->dst_y_per_vm_vblank; + old_dlg_attr.dst_y_per_row_vblank = new_dlg_attr->dst_y_per_row_vblank; + old_dlg_attr.dst_y_per_vm_flip = new_dlg_attr->dst_y_per_vm_flip; + old_dlg_attr.dst_y_per_row_flip = new_dlg_attr->dst_y_per_row_flip; + old_dlg_attr.refcyc_per_meta_chunk_vblank_l = new_dlg_attr->refcyc_per_meta_chunk_vblank_l; + old_dlg_attr.refcyc_per_meta_chunk_vblank_c = new_dlg_attr->refcyc_per_meta_chunk_vblank_c; + old_dlg_attr.refcyc_per_meta_chunk_flip_l = new_dlg_attr->refcyc_per_meta_chunk_flip_l; + old_dlg_attr.refcyc_per_line_delivery_pre_l = new_dlg_attr->refcyc_per_line_delivery_pre_l; + old_dlg_attr.refcyc_per_line_delivery_pre_c = new_dlg_attr->refcyc_per_line_delivery_pre_c; + old_ttu_attr.refcyc_per_req_delivery_pre_l = new_ttu_attr->refcyc_per_req_delivery_pre_l; + old_ttu_attr.refcyc_per_req_delivery_pre_c = new_ttu_attr->refcyc_per_req_delivery_pre_c; + old_ttu_attr.refcyc_per_req_delivery_pre_cur0 = new_ttu_attr->refcyc_per_req_delivery_pre_cur0; + old_ttu_attr.refcyc_per_req_delivery_pre_cur1 = new_ttu_attr->refcyc_per_req_delivery_pre_cur1; + old_ttu_attr.min_ttu_vblank = new_ttu_attr->min_ttu_vblank; + old_ttu_attr.qos_level_flip = new_ttu_attr->qos_level_flip; + new_pipe->update_flags.bits.hubp_interdependent = 1; + } + /* Detect any other updates to ttu/rq/dlg */ + if (memcmp(&old_dlg_attr, &new_pipe->dlg_regs, sizeof(old_dlg_attr)) || + memcmp(&old_ttu_attr, &new_pipe->ttu_regs, sizeof(old_ttu_attr)) || + memcmp(&old_pipe->rq_regs, &new_pipe->rq_regs, sizeof(old_pipe->rq_regs))) + new_pipe->update_flags.bits.hubp_rq_dlg_ttu = 1; + } +} - interdependent_update = top_pipe_to_program->plane_state && - top_pipe_to_program->plane_state->update_flags.bits.full_update; +static void dcn20_update_dchubp_dpp( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context) +{ + struct hubp *hubp = pipe_ctx->plane_res.hubp; + struct dpp *dpp = pipe_ctx->plane_res.dpp; + struct dc_plane_state *plane_state = pipe_ctx->plane_state; - if (interdependent_update) - lock_all_pipes(dc, context, true); - else - dcn20_pipe_control_lock(dc, top_pipe_to_program, true); + if (pipe_ctx->update_flags.bits.dppclk) + dpp->funcs->dpp_dppclk_control(dpp, false, true); - if (num_planes == 0) { - /* OTG blank before remove all front end */ - dc->hwss.blank_pixel_data(dc, top_pipe_to_program, true); + /* TODO: Need input parameter to tell current DCHUB pipe tie to which OTG + * VTG is within DCHUBBUB which is commond block share by each pipe HUBP. + * VTG is 1:1 mapping with OTG. Each pipe HUBP will select which VTG + */ + if (pipe_ctx->update_flags.bits.hubp_rq_dlg_ttu) { + hubp->funcs->hubp_vtg_sel(hubp, pipe_ctx->stream_res.tg->inst); + + hubp->funcs->hubp_setup( + hubp, + &pipe_ctx->dlg_regs, + &pipe_ctx->ttu_regs, + &pipe_ctx->rq_regs, + &pipe_ctx->pipe_dlg_param); + } + if (pipe_ctx->update_flags.bits.hubp_interdependent) + hubp->funcs->hubp_setup_interdependent( + hubp, + &pipe_ctx->dlg_regs, + &pipe_ctx->ttu_regs); + + if (pipe_ctx->update_flags.bits.enable || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.input_csc_change || + plane_state->update_flags.bits.color_space_change || + plane_state->update_flags.bits.coeff_reduction_change) { + struct dc_bias_and_scale bns_params = {0}; + + // program the input csc + dpp->funcs->dpp_setup(dpp, + plane_state->format, + EXPANSION_MODE_ZERO, + plane_state->input_csc_color_matrix, + plane_state->color_space, + NULL); + + if (dpp->funcs->dpp_program_bias_and_scale) { + //TODO :for CNVC set scale and bias registers if necessary + dcn10_build_prescale_params(&bns_params, plane_state); + dpp->funcs->dpp_program_bias_and_scale(dpp, &bns_params); + } } - /* Disconnect unused mpcc */ - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe_ctx = - &dc->current_state->res_ctx.pipe_ctx[i]; - /* - * Powergate reused pipes that are not powergated - * fairly hacky right now, using opp_id as indicator - * TODO: After move dc_post to dc_update, this will - * be removed. - */ - if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) { - if (old_pipe_ctx->stream_res.tg == tg && - old_pipe_ctx->plane_res.hubp && - old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID) - dc->hwss.disable_plane(dc, old_pipe_ctx); + if (pipe_ctx->update_flags.bits.mpcc + || plane_state->update_flags.bits.global_alpha_change + || plane_state->update_flags.bits.per_pixel_alpha_change) { + /* Need mpcc to be idle if changing opp */ + if (pipe_ctx->update_flags.bits.opp_changed) { + struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[pipe_ctx->pipe_idx]; + int mpcc_inst; + + for (mpcc_inst = 0; mpcc_inst < MAX_PIPES; mpcc_inst++) { + if (!old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) + continue; + dc->res_pool->mpc->funcs->wait_for_idle(dc->res_pool->mpc, mpcc_inst); + old_pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false; + } } + dc->hwss.update_mpcc(dc, pipe_ctx); + } - if ((!pipe_ctx->plane_state || - pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) && - old_pipe_ctx->plane_state && - old_pipe_ctx->stream_res.tg == tg) { + if (pipe_ctx->update_flags.bits.scaler || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.position_change || + plane_state->update_flags.bits.per_pixel_alpha_change || + pipe_ctx->stream->update_flags.bits.scaling) { + pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->plane_state->per_pixel_alpha; + ASSERT(pipe_ctx->plane_res.scl_data.lb_params.depth == LB_PIXEL_DEPTH_30BPP); + /* scaler configuration */ + pipe_ctx->plane_res.dpp->funcs->dpp_set_scaler( + pipe_ctx->plane_res.dpp, &pipe_ctx->plane_res.scl_data); + } - dc->hwss.plane_atomic_disconnect(dc, old_pipe_ctx); - removed_pipe[i] = true; + if (pipe_ctx->update_flags.bits.viewport || + (context == dc->current_state && plane_state->update_flags.bits.scaling_change) || + (context == dc->current_state && pipe_ctx->stream->update_flags.bits.scaling)) + hubp->funcs->mem_program_viewport( + hubp, + &pipe_ctx->plane_res.scl_data.viewport, + &pipe_ctx->plane_res.scl_data.viewport_c); + + /* Any updates are handled in dc interface, just need to apply existing for plane enable */ + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed) + && pipe_ctx->stream->cursor_attributes.address.quad_part != 0) { + dc->hwss.set_cursor_position(pipe_ctx); + dc->hwss.set_cursor_attribute(pipe_ctx); + + if (dc->hwss.set_cursor_sdr_white_level) + dc->hwss.set_cursor_sdr_white_level(pipe_ctx); + } - DC_LOG_DC("Reset mpcc for pipe %d\n", - old_pipe_ctx->pipe_idx); - } + /* Any updates are handled in dc interface, just need + * to apply existing for plane enable / opp change */ + if (pipe_ctx->update_flags.bits.enable || pipe_ctx->update_flags.bits.opp_changed + || pipe_ctx->stream->update_flags.bits.gamut_remap + || pipe_ctx->stream->update_flags.bits.out_csc) { + /* dpp/cm gamut remap*/ + dc->hwss.program_gamut_remap(pipe_ctx); + + /*call the dcn2 method which uses mpc csc*/ + dc->hwss.program_output_csc(dc, + pipe_ctx, + pipe_ctx->stream->output_color_space, + pipe_ctx->stream->csc_color_matrix.matrix, + hubp->opp_id); } - if (num_planes > 0) - dcn20_program_all_pipe_in_tree(dc, top_pipe_to_program, context); + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->update_flags.bits.opp_changed || + plane_state->update_flags.bits.pixel_format_change || + plane_state->update_flags.bits.horizontal_mirror_change || + plane_state->update_flags.bits.rotation_change || + plane_state->update_flags.bits.swizzle_change || + plane_state->update_flags.bits.dcc_change || + plane_state->update_flags.bits.bpp_change || + plane_state->update_flags.bits.scaling_change || + plane_state->update_flags.bits.plane_size_change) { + struct plane_size size = plane_state->plane_size; + + size.surface_size = pipe_ctx->plane_res.scl_data.viewport; + hubp->funcs->hubp_program_surface_config( + hubp, + plane_state->format, + &plane_state->tiling_info, + &size, + plane_state->rotation, + &plane_state->dcc, + plane_state->horizontal_mirror, + 0); + hubp->power_gated = false; + } - /* Program secondary blending tree and writeback pipes */ - if ((stream->num_wb_info > 0) && (dc->hwss.program_all_writeback_pipes_in_tree)) - dc->hwss.program_all_writeback_pipes_in_tree(dc, stream, context); + if (pipe_ctx->update_flags.bits.enable || plane_state->update_flags.bits.addr_update) + dc->hwss.update_plane_addr(dc, pipe_ctx); - if (interdependent_update) - for (i = 0; i < dc->res_pool->pipe_count; i++) { + if (pipe_ctx->update_flags.bits.enable) + hubp->funcs->set_blank(hubp, false); +} + + +static void dcn20_program_pipe( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context) +{ + /* Only need to unblank on top pipe */ + if ((pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.abm_level) + && !pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe) + dc->hwss.blank_pixel_data(dc, pipe_ctx, !pipe_ctx->plane_state->visible); + + if (pipe_ctx->update_flags.bits.global_sync) { + pipe_ctx->stream_res.tg->funcs->program_global_sync( + pipe_ctx->stream_res.tg, + pipe_ctx->pipe_dlg_param.vready_offset, + pipe_ctx->pipe_dlg_param.vstartup_start, + pipe_ctx->pipe_dlg_param.vupdate_offset, + pipe_ctx->pipe_dlg_param.vupdate_width); + + pipe_ctx->stream_res.tg->funcs->set_vtg_params( + pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); + + if (dc->hwss.setup_vupdate_interrupt) + dc->hwss.setup_vupdate_interrupt(pipe_ctx); + } + + if (pipe_ctx->update_flags.bits.odm) + dc->hwss.update_odm(dc, context, pipe_ctx); + + if (pipe_ctx->update_flags.bits.enable) + dcn20_enable_plane(dc, pipe_ctx, context); + + if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw) + dcn20_update_dchubp_dpp(dc, pipe_ctx, context); + + if (pipe_ctx->update_flags.bits.enable + || pipe_ctx->plane_state->update_flags.bits.sdr_white_level) + set_hdr_multiplier(pipe_ctx); + + if (pipe_ctx->update_flags.bits.enable || + pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change || + pipe_ctx->plane_state->update_flags.bits.gamma_change) + dc->hwss.set_input_transfer_func(pipe_ctx, pipe_ctx->plane_state); + + /* dcn10_translate_regamma_to_hw_format takes 750us to finish + * only do gamma programming for powering on, internal memcmp to avoid + * updating on slave planes + */ + if (pipe_ctx->update_flags.bits.enable || pipe_ctx->stream->update_flags.bits.out_tf) + dc->hwss.set_output_transfer_func(pipe_ctx, pipe_ctx->stream); + + /* If the pipe has been enabled or has a different opp, we + * should reprogram the fmt. This deals with cases where + * interation between mpc and odm combine on different streams + * causes a different pipe to be chosen to odm combine with. + */ + if (pipe_ctx->update_flags.bits.enable + || pipe_ctx->update_flags.bits.opp_changed) { + + pipe_ctx->stream_res.opp->funcs->opp_set_dyn_expansion( + pipe_ctx->stream_res.opp, + COLOR_SPACE_YCBCR601, + pipe_ctx->stream->timing.display_color_depth, + pipe_ctx->stream->signal); + + pipe_ctx->stream_res.opp->funcs->opp_program_fmt( + pipe_ctx->stream_res.opp, + &pipe_ctx->stream->bit_depth_params, + &pipe_ctx->stream->clamping); + } +} + +static bool does_pipe_need_lock(struct pipe_ctx *pipe) +{ + if ((pipe->plane_state && pipe->plane_state->update_flags.raw) + || pipe->update_flags.raw) + return true; + if (pipe->bottom_pipe) + return does_pipe_need_lock(pipe->bottom_pipe); + + return false; +} + +static void dcn20_program_front_end_for_ctx( + struct dc *dc, + struct dc_state *context) +{ + const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100; + int i; + bool pipe_locked[MAX_PIPES] = {false}; + DC_LOGGER_INIT(dc->ctx->logger); + + /* Carry over GSL groups in case the context is changing. */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream) + context->res_ctx.pipe_ctx[i].stream_res.gsl_group = + dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group; + + /* Set pipe update flags and lock pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i], + &context->res_ctx.pipe_ctx[i]); + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (!context->res_ctx.pipe_ctx[i].top_pipe && + does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) { struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; - /* Skip inactive pipes and ones already updated */ - if (!pipe_ctx->stream || pipe_ctx->stream == stream || - !pipe_ctx->plane_state || !tg->funcs->is_tg_enabled(tg)) - continue; + if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, pipe_ctx, true); + if (!pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true); + pipe_locked[i] = true; + } - pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( - pipe_ctx->plane_res.hubp, - &pipe_ctx->dlg_regs, - &pipe_ctx->ttu_regs); + /* OTG blank before disabling all front ends */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable + && !context->res_ctx.pipe_ctx[i].top_pipe + && !context->res_ctx.pipe_ctx[i].prev_odm_pipe + && context->res_ctx.pipe_ctx[i].stream) + dc->hwss.blank_pixel_data(dc, &context->res_ctx.pipe_ctx[i], true); + + /* Disconnect mpcc */ + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable + || context->res_ctx.pipe_ctx[i].update_flags.bits.opp_changed) { + dc->hwss.plane_atomic_disconnect(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); } - if (interdependent_update) - lock_all_pipes(dc, context, false); - else - dcn20_pipe_control_lock(dc, top_pipe_to_program, false); + /* + * Program all updated pipes, order matters for mpcc setup. Start with + * top pipe and program all pipes that follow in order + */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && !pipe->top_pipe) { + while (pipe) { + dcn20_program_pipe(dc, pipe, context); + pipe = pipe->bottom_pipe; + } + /* Program secondary blending tree and writeback pipes */ + pipe = &context->res_ctx.pipe_ctx[i]; + if (!pipe->prev_odm_pipe && pipe->stream->num_wb_info > 0 + && (pipe->update_flags.raw || pipe->plane_state->update_flags.raw || pipe->stream->update_flags.raw) + && dc->hwss.program_all_writeback_pipes_in_tree) + dc->hwss.program_all_writeback_pipes_in_tree(dc, pipe->stream, context); + } + } + /* Unlock all locked pipes */ for (i = 0; i < dc->res_pool->pipe_count; i++) - if (removed_pipe[i]) - dcn20_disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); + if (pipe_locked[i]) { + struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + + if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, pipe_ctx, false); + if (!pipe_ctx->update_flags.bits.enable) + dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false); + } + + for (i = 0; i < dc->res_pool->pipe_count; i++) + if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) + dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]); /* * If we are enabling a pipe, we need to wait for pending clear as this is a critical @@ -1239,15 +1515,22 @@ static void dcn20_apply_ctx_for_surface( * will cause HW to perform an "immediate enable" (as opposed to "vsync enable") which * is unsupported on DCN. */ - i = 0; - if (num_planes > 0 && top_pipe_to_program && - (prev_top_pipe_to_program == NULL || prev_top_pipe_to_program->plane_state == NULL)) { - while (i < TIMEOUT_FOR_PIPE_ENABLE_MS && - top_pipe_to_program->plane_res.hubp->funcs->hubp_is_flip_pending(top_pipe_to_program->plane_res.hubp)) { - i += 1; - msleep(1); + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (pipe->plane_state && !pipe->top_pipe && pipe->update_flags.bits.enable) { + struct hubp *hubp = pipe->plane_res.hubp; + int j = 0; + + for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS + && hubp->funcs->hubp_is_flip_pending(hubp); j++) + msleep(1); } } + + /* WA to apply WM setting*/ + if (dc->hwseq->wa.DEGVIDCN21) + dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub); } @@ -1319,8 +1602,12 @@ bool dcn20_update_bandwidth( pipe_ctx->stream_res.tg->funcs->set_vtg_params( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); + if (pipe_ctx->prev_odm_pipe == NULL) dc->hwss.blank_pixel_data(dc, pipe_ctx, blank); + + if (dc->hwss.setup_vupdate_interrupt) + dc->hwss.setup_vupdate_interrupt(pipe_ctx); } pipe_ctx->plane_res.hubp->funcs->hubp_setup( @@ -1337,7 +1624,8 @@ bool dcn20_update_bandwidth( static void dcn20_enable_writeback( struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info) + struct dc_writeback_info *wb_info, + struct dc_state *context) { struct dwbc *dwb; struct mcif_wb *mcif_wb; @@ -1354,7 +1642,7 @@ static void dcn20_enable_writeback( optc->funcs->set_dwb_source(optc, wb_info->dwb_pipe_inst); /* set MCIF_WB buffer and arbitration configuration */ mcif_wb->funcs->config_mcif_buf(mcif_wb, &wb_info->mcif_buf_params, wb_info->dwb_params.dest_height); - mcif_wb->funcs->config_mcif_arb(mcif_wb, &dc->current_state->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); + mcif_wb->funcs->config_mcif_arb(mcif_wb, &context->bw_ctx.bw.dcn.bw_writeback.mcif_wb_arb[wb_info->dwb_pipe_inst]); /* Enable MCIF_WB */ mcif_wb->funcs->enable_mcif(mcif_wb); /* Enable DWB */ @@ -1702,6 +1990,28 @@ static void dcn20_reset_hw_ctx_wrap( } } +void dcn20_get_mpctree_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color) +{ + const struct tg_color pipe_colors[6] = { + {MAX_TG_COLOR_VALUE, 0, 0}, // red + {MAX_TG_COLOR_VALUE, 0, MAX_TG_COLOR_VALUE}, // yellow + {0, MAX_TG_COLOR_VALUE, 0}, // blue + {MAX_TG_COLOR_VALUE / 2, 0, MAX_TG_COLOR_VALUE / 2}, // purple + {0, 0, MAX_TG_COLOR_VALUE}, // green + {MAX_TG_COLOR_VALUE, MAX_TG_COLOR_VALUE * 2 / 3, 0}, // orange + }; + + struct pipe_ctx *top_pipe = pipe_ctx; + + while (top_pipe->top_pipe) { + top_pipe = top_pipe->top_pipe; + } + + *color = pipe_colors[top_pipe->pipe_idx]; +} + static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) { struct hubp *hubp = pipe_ctx->plane_res.hubp; @@ -1719,6 +2029,9 @@ static void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx) } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_SURFACE) { dcn10_get_surface_visual_confirm_color( pipe_ctx, &blnd_cfg.black_color); + } else if (dc->debug.visual_confirm == VISUAL_CONFIRM_MPCTREE) { + dcn20_get_mpctree_visual_confirm_color( + pipe_ctx, &blnd_cfg.black_color); } if (per_pixel_alpha) @@ -1919,8 +2232,10 @@ static void dcn20_enable_stream(struct pipe_ctx *pipe_ctx) link->link_enc->funcs->connect_dig_be_to_fe(link->link_enc, pipe_ctx->stream_res.stream_enc->id, true); - if (link->dc->hwss.program_dmdata_engine) - link->dc->hwss.program_dmdata_engine(pipe_ctx); + if (pipe_ctx->plane_state && pipe_ctx->plane_state->flip_immediate != 1) { + if (link->dc->hwss.program_dmdata_engine) + link->dc->hwss.program_dmdata_engine(pipe_ctx); + } link->dc->hwss.update_info_frame(pipe_ctx); @@ -2095,7 +2410,8 @@ void dcn20_hw_sequencer_construct(struct dc *dc) dc->hwss.program_triplebuffer = dcn20_program_tripleBuffer; dc->hwss.set_input_transfer_func = dcn20_set_input_transfer_func; dc->hwss.set_output_transfer_func = dcn20_set_output_transfer_func; - dc->hwss.apply_ctx_for_surface = dcn20_apply_ctx_for_surface; + dc->hwss.apply_ctx_for_surface = NULL; + dc->hwss.program_front_end_for_ctx = dcn20_program_front_end_for_ctx; dc->hwss.pipe_control_lock = dcn20_pipe_control_lock; dc->hwss.pipe_control_lock_global = dcn20_pipe_control_lock_global; dc->hwss.optimize_bandwidth = dcn20_optimize_bandwidth; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h index 92ab3dd91814..3098f1049ed7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h @@ -96,4 +96,20 @@ void dcn20_init_blank( struct dc *dc, struct timing_generator *tg); void dcn20_display_init(struct dc *dc); +void dcn20_pipe_control_lock( + struct dc *dc, + struct pipe_ctx *pipe, + bool lock); +void dcn20_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx); +void dcn20_enable_plane( + struct dc *dc, + struct pipe_ctx *pipe_ctx, + struct dc_state *context); +bool dcn20_set_blend_lut( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +bool dcn20_set_shaper_3dlut( + struct pipe_ctx *pipe_ctx, const struct dc_plane_state *plane_state); +void dcn20_get_mpctree_visual_confirm_color( + struct pipe_ctx *pipe_ctx, + struct tg_color *color); #endif /* __DC_HWSS_DCN20_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h index 3736b5548a25..0c98a0bbbd14 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_link_encoder.h @@ -91,6 +91,13 @@ struct mpll_cfg { uint32_t ref_range; uint32_t ref_clk; bool hdmimode_enable; + bool sup_pre_hp; + bool dp_tx0_vergdrv_byp; + bool dp_tx1_vergdrv_byp; + bool dp_tx2_vergdrv_byp; + bool dp_tx3_vergdrv_byp; + + }; struct dpcssys_phy_seq_cfg { diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c index 2137e2be2140..3b613fb93ef8 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_optc.c @@ -287,6 +287,10 @@ void optc2_get_optc_source(struct timing_generator *optc, *num_of_src_opp = 2; else *num_of_src_opp = 1; + + /* Work around VBIOS not updating OPTC_NUM_OF_INPUT_SEGMENT */ + if (*src_opp_id_1 == 0xf) + *num_of_src_opp = 1; } void optc2_set_dwb_source(struct timing_generator *optc, @@ -456,7 +460,7 @@ static struct timing_generator_funcs dcn20_tg_funcs = { .set_vtg_params = optc1_set_vtg_params, .program_manual_trigger = optc2_program_manual_trigger, .setup_manual_trigger = optc2_setup_manual_trigger, - .is_matching_timing = optc1_is_matching_timing + .get_hw_timing = optc1_get_hw_timing, }; void dcn20_timing_generator_init(struct optc *optc1) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 6b2f2f1a1c9c..bbd1c98564be 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -581,11 +581,13 @@ static const struct dcn2_dpp_registers tf_regs[] = { }; static const struct dcn2_dpp_shift tf_shift = { - TF_REG_LIST_SH_MASK_DCN20(__SHIFT) + TF_REG_LIST_SH_MASK_DCN20(__SHIFT), + TF_DEBUG_REG_LIST_SH_DCN10 }; static const struct dcn2_dpp_mask tf_mask = { - TF_REG_LIST_SH_MASK_DCN20(_MASK) + TF_REG_LIST_SH_MASK_DCN20(_MASK), + TF_DEBUG_REG_LIST_MASK_DCN10 }; #define dwbc_regs_dcn2(id)\ @@ -732,6 +734,42 @@ static const struct dcn20_vmid_mask vmid_masks = { DCN20_VMID_MASK_SH_LIST(_MASK) }; +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + case TRANSMITTER_UNIPHY_F: + return 5; + break; + default: + ASSERT(0); + return 0; + } +} + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT #define dsc_regsDCN20(id)\ [id] = {\ @@ -825,7 +863,7 @@ static const struct dc_debug_options debug_defaults_drv = { .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_DYNAMIC, - .force_single_disp_pipe_split = true, + .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, @@ -922,7 +960,10 @@ struct dce_aux *dcn20_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -1042,14 +1083,18 @@ struct link_encoder *dcn20_link_encoder_create( { struct dcn20_link_encoder *enc20 = kzalloc(sizeof(struct dcn20_link_encoder), GFP_KERNEL); + int link_regs_id; if (!enc20) return NULL; + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + dcn20_link_encoder_construct(enc20, enc_init_data, &link_enc_feature, - &link_enc_regs[enc_init_data->transmitter], + &link_enc_regs[link_regs_id], &link_enc_aux_regs[enc_init_data->channel - 1], &link_enc_hpd_regs[enc_init_data->hpd_source], &le_shift, @@ -1159,6 +1204,8 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .create_hwseq = dcn20_hwseq_create, }; +static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu); + void dcn20_clock_source_destroy(struct clock_source **clk_src) { kfree(TO_DCE110_CLK_SRC(*clk_src)); @@ -1601,7 +1648,7 @@ static void swizzle_to_dml_params( } } -static bool dcn20_split_stream_for_odm( +bool dcn20_split_stream_for_odm( struct resource_context *res_ctx, const struct resource_pool *pool, struct pipe_ctx *prev_odm_pipe, @@ -1622,7 +1669,6 @@ static bool dcn20_split_stream_for_odm( next_odm_pipe->stream_res.dsc = NULL; #endif if (prev_odm_pipe->next_odm_pipe && prev_odm_pipe->next_odm_pipe != next_odm_pipe) { - ASSERT(!next_odm_pipe->next_odm_pipe); next_odm_pipe->next_odm_pipe = prev_odm_pipe->next_odm_pipe; next_odm_pipe->next_odm_pipe->prev_odm_pipe = next_odm_pipe; } @@ -1679,7 +1725,7 @@ static bool dcn20_split_stream_for_odm( return true; } -static void dcn20_split_stream_for_mpc( +void dcn20_split_stream_for_mpc( struct resource_context *res_ctx, const struct resource_pool *pool, struct pipe_ctx *primary_pipe, @@ -1765,7 +1811,7 @@ int dcn20_populate_dml_pipes_from_context( pipe_cnt = i; continue; } - if (!resource_are_streams_timing_synchronizable( + if (dc->debug.disable_timing_sync || !resource_are_streams_timing_synchronizable( res_ctx->pipe_ctx[pipe_cnt].stream, res_ctx->pipe_ctx[i].stream)) { synchronized_vblank = false; @@ -1897,7 +1943,7 @@ int dcn20_populate_dml_pipes_from_context( break; case PIXEL_ENCODING_YCBCR420: pipes[pipe_cnt].dout.output_format = dm_420; - pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3) / 2; + pipes[pipe_cnt].dout.output_bpp = (output_bpc * 3.0) / 2; break; case PIXEL_ENCODING_YCBCR422: if (true) /* todo */ @@ -1911,6 +1957,11 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].dout.output_bpp = output_bpc * 3; } +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + if (res_ctx->pipe_ctx[i].stream->timing.flags.DSC) + pipes[pipe_cnt].dout.output_bpp = res_ctx->pipe_ctx[i].stream->timing.dsc_cfg.bits_per_pixel / 16.0; +#endif + /* todo: default max for now, until there is logic reflecting this in dc*/ pipes[pipe_cnt].dout.output_bpc = 12; /* @@ -2132,7 +2183,7 @@ void dcn20_set_mcif_arb_params( } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) +bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) { int i; @@ -2167,7 +2218,7 @@ static bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx) } #endif -static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, +struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, struct resource_context *res_ctx, const struct resource_pool *pool, const struct pipe_ctx *primary_pipe) @@ -2207,7 +2258,8 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, */ if (secondary_pipe == NULL) { for (j = dc->res_pool->pipe_count - 1; j >= 0; j--) { - if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL) { + if (dc->current_state->res_ctx.pipe_ctx[j].top_pipe == NULL + && dc->current_state->res_ctx.pipe_ctx[j].prev_odm_pipe == NULL) { preferred_pipe_idx = j; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { @@ -2243,29 +2295,11 @@ static struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, return secondary_pipe; } -bool dcn20_fast_validate_bw( +void dcn20_merge_pipes_for_validate( struct dc *dc, - struct dc_state *context, - display_e2e_pipe_params_st *pipes, - int *pipe_cnt_out, - int *pipe_split_from, - int *vlevel_out) + struct dc_state *context) { - bool out = false; - - int pipe_cnt, i, pipe_idx, vlevel, vlevel_unsplit; - bool odm_capable = context->bw_ctx.dml.ip.odm_capable; - bool force_split = false; -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - bool failed_non_odm_dsc = false; -#endif - int split_threshold = dc->res_pool->pipe_count / 2; - bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC; - - - ASSERT(pipes); - if (!pipes) - return false; + int i; /* merge previously split odm pipes since mode support needs to make the decision */ for (i = 0; i < dc->res_pool->pipe_count; i++) { @@ -2320,51 +2354,19 @@ bool dcn20_fast_validate_bw( if (pipe->plane_state) resource_build_scaling_params(pipe); } +} - if (dc->res_pool->funcs->populate_dml_pipes) - pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, - &context->res_ctx, pipes); - else - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, - &context->res_ctx, pipes); - - *pipe_cnt_out = pipe_cnt; - - if (!pipe_cnt) { - out = true; - goto validate_out; - } - - context->bw_ctx.dml.ip.odm_capable = 0; - - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - - context->bw_ctx.dml.ip.odm_capable = odm_capable; - -#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT - /* 1 dsc per stream dsc validation */ - if (vlevel <= context->bw_ctx.dml.soc.num_states) - if (!dcn20_validate_dsc(dc, context)) { - failed_non_odm_dsc = true; - vlevel = context->bw_ctx.dml.soc.num_states + 1; - } -#endif - - if (vlevel > context->bw_ctx.dml.soc.num_states && odm_capable) - vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); - - if (vlevel > context->bw_ctx.dml.soc.num_states) - goto validate_fail; - - if ((context->stream_count > split_threshold && dc->current_state->stream_count <= split_threshold) - || (context->stream_count <= split_threshold && dc->current_state->stream_count > split_threshold)) - context->commit_hints.full_update_needed = true; - - /*initialize pipe_just_split_from to invalid idx*/ - for (i = 0; i < MAX_PIPES; i++) - pipe_split_from[i] = -1; +int dcn20_validate_apply_pipe_split_flags( + struct dc *dc, + struct dc_state *context, + int vlevel, + bool *split) +{ + int i, pipe_idx, vlevel_split; + bool force_split = false; + bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC; - /* Single display only conditionals get set here */ + /* Single display loop, exits if there is more than one display */ for (i = 0; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; bool exit_loop = false; @@ -2391,38 +2393,107 @@ bool dcn20_fast_validate_bw( if (exit_loop) break; } - - if (context->stream_count > split_threshold) + /* TODO: fix dc bugs and remove this split threshold thing */ + if (context->stream_count > dc->res_pool->pipe_count / 2) avoid_split = true; - vlevel_unsplit = vlevel; + /* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */ + if (avoid_split) { + for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + if (!context->res_ctx.pipe_ctx[i].stream) + continue; + + for (vlevel_split = vlevel; vlevel <= context->bw_ctx.dml.soc.num_states; vlevel++) + if (context->bw_ctx.dml.vba.NoOfDPP[vlevel][0][pipe_idx] == 1) + break; + /* Impossible to not split this pipe */ + if (vlevel > context->bw_ctx.dml.soc.num_states) + vlevel = vlevel_split; + pipe_idx++; + } + context->bw_ctx.dml.vba.maxMpcComb = 0; + } + + /* Split loop sets which pipe should be split based on dml outputs and dc flags */ for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + if (!context->res_ctx.pipe_ctx[i].stream) continue; - for (; vlevel_unsplit <= context->bw_ctx.dml.soc.num_states; vlevel_unsplit++) - if (context->bw_ctx.dml.vba.NoOfDPP[vlevel_unsplit][0][pipe_idx] == 1) - break; + + if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1) + split[i] = true; + if ((pipe->stream->view_format == + VIEW_3D_FORMAT_SIDE_BY_SIDE || + pipe->stream->view_format == + VIEW_3D_FORMAT_TOP_AND_BOTTOM) && + (pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_TOP_AND_BOTTOM || + pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_SIDE_BY_SIDE)) + split[i] = true; + if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { + split[i] = true; + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true; + } + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = + context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; + /* Adjust dppclk when split is forced, do not bother with dispclk */ + if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; pipe_idx++; } + return vlevel; +} + +bool dcn20_fast_validate_bw( + struct dc *dc, + struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int *pipe_cnt_out, + int *pipe_split_from, + int *vlevel_out) +{ + bool out = false; + bool split[MAX_PIPES] = { false }; + int pipe_cnt, i, pipe_idx, vlevel; + + ASSERT(pipes); + if (!pipes) + return false; + + dcn20_merge_pipes_for_validate(dc, context); + + pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes); + + *pipe_cnt_out = pipe_cnt; + + if (!pipe_cnt) { + out = true; + goto validate_out; + } + + vlevel = dml_get_voltage_level(&context->bw_ctx.dml, pipes, pipe_cnt); + + if (vlevel > context->bw_ctx.dml.soc.num_states) + goto validate_fail; + + vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split); + + /*initialize pipe_just_split_from to invalid idx*/ + for (i = 0; i < MAX_PIPES; i++) + pipe_split_from[i] = -1; + for (i = 0, pipe_idx = -1; i < dc->res_pool->pipe_count; i++) { struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; - bool need_split = true; - bool need_split3d; if (!pipe->stream || pipe_split_from[i] >= 0) continue; pipe_idx++; - if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) { - force_split = true; - context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] = true; - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = true; - } - if (force_split && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1) - context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2; if (!pipe->top_pipe && !pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); ASSERT(hsplit_pipe); @@ -2440,40 +2511,26 @@ bool dcn20_fast_validate_bw( if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state) continue; - need_split3d = ((pipe->stream->view_format == - VIEW_3D_FORMAT_SIDE_BY_SIDE || - pipe->stream->view_format == - VIEW_3D_FORMAT_TOP_AND_BOTTOM) && - (pipe->stream->timing.timing_3d_format == - TIMING_3D_FORMAT_TOP_AND_BOTTOM || - pipe->stream->timing.timing_3d_format == - TIMING_3D_FORMAT_SIDE_BY_SIDE)); - - if (avoid_split && vlevel_unsplit <= context->bw_ctx.dml.soc.num_states && !force_split && !need_split3d) { - need_split = false; - vlevel = vlevel_unsplit; - context->bw_ctx.dml.vba.maxMpcComb = 0; - } else - need_split = context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 2; - /* We do not support mpo + odm at the moment */ if (hsplit_pipe && hsplit_pipe->plane_state != pipe->plane_state && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) goto validate_fail; - if (need_split3d || need_split || force_split) { + if (split[i]) { if (!hsplit_pipe || hsplit_pipe->plane_state != pipe->plane_state) { /* pipe not split previously needs split */ hsplit_pipe = dcn20_find_secondary_pipe(dc, &context->res_ctx, dc->res_pool, pipe); - ASSERT(hsplit_pipe || force_split); - if (!hsplit_pipe) + ASSERT(hsplit_pipe); + if (!hsplit_pipe) { + context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] *= 2; continue; - + } if (context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]) { if (!dcn20_split_stream_for_odm( &context->res_ctx, dc->res_pool, pipe, hsplit_pipe)) goto validate_fail; + dcn20_build_mapped_resource(dc, context, pipe->stream); } else dcn20_split_stream_for_mpc( &context->res_ctx, dc->res_pool, @@ -2487,7 +2544,7 @@ bool dcn20_fast_validate_bw( } #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT /* Actual dsc count per stream dsc validation*/ - if (failed_non_odm_dsc && !dcn20_validate_dsc(dc, context)) { + if (!dcn20_validate_dsc(dc, context)) { context->bw_ctx.dml.vba.ValidationStatus[context->bw_ctx.dml.vba.soc.num_states] = DML_FAIL_DSC_VALIDATION_FAILURE; goto validate_fail; @@ -2506,7 +2563,7 @@ validate_out: return out; } -void dcn20_calculate_wm( +static void dcn20_calculate_wm( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int *out_pipe_cnt, @@ -2527,7 +2584,7 @@ void dcn20_calculate_wm( context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx]; if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_idx] == pipe_idx) pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx]; + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx]; else pipes[pipe_cnt].pipe.dest.odm_combine = 0; pipe_idx++; @@ -2536,7 +2593,7 @@ void dcn20_calculate_wm( context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_split_from[i]]; if (context->bw_ctx.dml.vba.BlendingAndTiming[pipe_split_from[i]] == pipe_split_from[i]) pipes[pipe_cnt].pipe.dest.odm_combine = - context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_split_from[i]]; + context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_split_from[i]]; else pipes[pipe_cnt].pipe.dest.odm_combine = 0; } @@ -2579,6 +2636,11 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.b.urgent_latency_ns = get_urgent_latency(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#endif if (vlevel < 2) { pipes[0].clks_cfg.voltage = 2; @@ -2590,6 +2652,10 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.c.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#endif if (vlevel < 3) { pipes[0].clks_cfg.voltage = 3; @@ -2601,6 +2667,10 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.d.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#endif pipes[0].clks_cfg.voltage = vlevel; pipes[0].clks_cfg.dcfclk_mhz = context->bw_ctx.dml.soc.clock_limits[vlevel].dcfclk_mhz; @@ -2610,6 +2680,10 @@ void dcn20_calculate_wm( context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = get_wm_stutter_exit(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = get_wm_dram_clock_change(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = get_wm_memory_trip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; + context->bw_ctx.bw.dcn.watermarks.a.frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(&context->bw_ctx.dml, pipes, pipe_cnt) * 1000; +#endif } void dcn20_calculate_dlg_params( @@ -2629,7 +2703,7 @@ void dcn20_calculate_dlg_params( context->bw_ctx.bw.dcn.clk.socclk_khz = context->bw_ctx.dml.vba.SOCCLK * 1000; context->bw_ctx.bw.dcn.clk.dramclk_khz = context->bw_ctx.dml.vba.DRAMSpeed * 1000 / 16; context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = context->bw_ctx.dml.vba.DCFCLKDeepSleep * 1000; - context->bw_ctx.bw.dcn.clk.fclk_khz = 0; + context->bw_ctx.bw.dcn.clk.fclk_khz = context->bw_ctx.dml.vba.FabricClock * 1000; context->bw_ctx.bw.dcn.clk.p_state_change_support = context->bw_ctx.dml.vba.DRAMClockChangeSupport[vlevel][context->bw_ctx.dml.vba.maxMpcComb] != dm_dram_clock_change_unsupported; @@ -2645,8 +2719,8 @@ void dcn20_calculate_dlg_params( continue; if (!visited[pipe_idx]) { - display_pipe_source_params_st *src = &pipes[pipe_idx_unsplit].pipe.src; - display_pipe_dest_params_st *dst = &pipes[pipe_idx_unsplit].pipe.dest; + display_pipe_source_params_st *src = &pipes[pipe_idx].pipe.src; + display_pipe_dest_params_st *dst = &pipes[pipe_idx].pipe.dest; dst->vstartup_start = context->bw_ctx.dml.vba.VStartup[pipe_idx_unsplit]; dst->vupdate_offset = context->bw_ctx.dml.vba.VUpdateOffsetPix[pipe_idx_unsplit]; @@ -2806,7 +2880,6 @@ bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, ASSERT(false); restore_dml_state: - memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib)); context->bw_ctx.dml.soc.dram_clock_change_latency_us = p_state_latency_us; return voltage_supported; @@ -2892,6 +2965,7 @@ static struct resource_funcs dcn20_res_pool_funcs = { .populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context, .get_default_swizzle_mode = dcn20_get_default_swizzle_mode, .set_mcif_arb_params = dcn20_set_mcif_arb_params, + .populate_dml_pipes = dcn20_populate_dml_pipes_from_context, .find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link }; @@ -2900,8 +2974,6 @@ bool dcn20_dwbc_create(struct dc_context *ctx, struct resource_pool *pool) int i; uint32_t pipe_count = pool->res_cap->num_dwb; - ASSERT(pipe_count > 0); - for (i = 0; i < pipe_count; i++) { struct dcn20_dwbc *dwbc20 = kzalloc(sizeof(struct dcn20_dwbc), GFP_KERNEL); @@ -2947,7 +3019,7 @@ bool dcn20_mmhubbub_create(struct dc_context *ctx, struct resource_pool *pool) return true; } -struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) +static struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); @@ -2962,7 +3034,7 @@ struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx) return pp_smu; } -void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) +static void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) { if (pp_smu && *pp_smu) { kfree(*pp_smu); @@ -2970,7 +3042,7 @@ void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu) } } -static void cap_soc_clocks( +void dcn20_cap_soc_clocks( struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table max_clocks) { @@ -3037,10 +3109,10 @@ static void cap_soc_clocks( } } -static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, +void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states) { - struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES] = {0}; + struct _vcs_dpi_voltage_scaling_st calculated_states[MAX_CLOCK_LIMIT_STATES]; int i; int num_calculated_states = 0; int min_dcfclk = 0; @@ -3048,12 +3120,18 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ if (num_states == 0) return; + memset(calculated_states, 0, sizeof(calculated_states)); + if (dc->bb_overrides.min_dcfclk_mhz > 0) min_dcfclk = dc->bb_overrides.min_dcfclk_mhz; - else - // Accounting for SOC/DCF relationship, we can go as high as - // 506Mhz in Vmin. We need to code 507 since SMU will round down to 506. - min_dcfclk = 507; + else { + if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev)) + min_dcfclk = 310; + else + // Accounting for SOC/DCF relationship, we can go as high as + // 506Mhz in Vmin. + min_dcfclk = 506; + } for (i = 0; i < num_states; i++) { int min_fclk_required_by_uclk; @@ -3093,7 +3171,7 @@ static void update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_ bb->clock_limits[num_calculated_states].state = bb->num_states; } -static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) +void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) { kernel_fpu_begin(); if ((int)(bb->sr_exit_time_us * 1000) != dc->bb_overrides.sr_exit_time_ns @@ -3292,14 +3370,14 @@ static bool init_soc_bounding_box(struct dc *dc, } if (clock_limits_available && uclk_states_available && num_states) - update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); + dcn20_update_bounding_box(dc, loaded_bb, &max_clocks, uclk_states, num_states); else if (clock_limits_available) - cap_soc_clocks(loaded_bb, max_clocks); + dcn20_cap_soc_clocks(loaded_bb, max_clocks); } loaded_ip->max_num_otg = pool->base.res_cap->num_timing_generator; loaded_ip->max_num_dpp = pool->base.pipe_count; - patch_bounding_box(dc, loaded_bb); + dcn20_patch_bounding_box(dc, loaded_bb); return true; } @@ -3345,6 +3423,7 @@ static bool construct( dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; dc->caps.hw_3d_lut = true; + dc->caps.extended_aux_timeout_support = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) { dc->debug = debug_defaults_drv; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h index 44f95aa0d61e..fef473d68a4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h @@ -95,9 +95,12 @@ struct display_stream_compressor *dcn20_dsc_create( struct dc_context *ctx, uint32_t inst); void dcn20_dsc_destroy(struct display_stream_compressor **dsc); -struct pp_smu_funcs *dcn20_pp_smu_create(struct dc_context *ctx); -void dcn20_pp_smu_destroy(struct pp_smu_funcs **pp_smu); - +void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb); +void dcn20_cap_soc_clocks( + struct _vcs_dpi_soc_bounding_box_st *bb, + struct pp_smu_nv_clock_table max_clocks); +void dcn20_update_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb, + struct pp_smu_nv_clock_table *max_clocks, unsigned int *uclk_states, unsigned int num_states); struct hubp *dcn20_hubp_create( struct dc_context *ctx, uint32_t inst); @@ -116,6 +119,31 @@ void dcn20_set_mcif_arb_params( display_e2e_pipe_params_st *pipes, int pipe_cnt); bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); +void dcn20_merge_pipes_for_validate( + struct dc *dc, + struct dc_state *context); +int dcn20_validate_apply_pipe_split_flags( + struct dc *dc, + struct dc_state *context, + int vlevel, + bool *split); +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT +bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx); +#endif +void dcn20_split_stream_for_mpc( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct pipe_ctx *primary_pipe, + struct pipe_ctx *secondary_pipe); +bool dcn20_split_stream_for_odm( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct pipe_ctx *prev_odm_pipe, + struct pipe_ctx *next_odm_pipe); +struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, + struct resource_context *res_ctx, + const struct resource_pool *pool, + const struct pipe_ctx *primary_pipe); bool dcn20_fast_validate_bw( struct dc *dc, struct dc_state *context, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c index 5ab9d6240498..4b3401616434 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c @@ -578,6 +578,10 @@ static const struct stream_encoder_funcs dcn20_str_enc_funcs = { .set_avmute = enc1_stream_encoder_set_avmute, .dig_connect_to_otg = enc1_dig_connect_to_otg, .dig_source_otg = enc1_dig_source_otg, + + .dp_get_pixel_format = + enc1_stream_encoder_dp_get_pixel_format, + #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .enc_read_state = enc2_read_state, .dp_set_dsc_config = enc2_dp_set_dsc_config, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile index ff50ae71fe27..14113ccf498d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn21/Makefile @@ -1,7 +1,7 @@ # # Makefile for DCN21. -DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o +DCN21 = dcn21_hubp.o dcn21_hubbub.o dcn21_resource.o dcn21_hwseq.o dcn21_link_encoder.o CFLAGS_$(AMDDALPATH)/dc/dcn21/dcn21_resource.o := -mhard-float -msse diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c index d1266741763b..f546260c15b7 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c @@ -22,6 +22,7 @@ * Authors: AMD * */ +#include <linux/delay.h> #include "dm_services.h" #include "dcn20/dcn20_hubbub.h" #include "dcn21_hubbub.h" @@ -51,7 +52,7 @@ #ifdef NUM_VMID #undef NUM_VMID #endif -#define NUM_VMID 1 +#define NUM_VMID 16 static uint32_t convert_and_clamp( uint32_t wm_ns, @@ -71,56 +72,76 @@ static uint32_t convert_and_clamp( void dcn21_dchvm_init(struct hubbub *hubbub) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t riommu_active; + int i; //Init DCHVM block REG_UPDATE(DCHVM_CTRL0, HOSTVM_INIT_REQ, 1); //Poll until RIOMMU_ACTIVE = 1 - //TODO: Figure out interval us and retry count - REG_WAIT(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, 1, 5, 100); + for (i = 0; i < 100; i++) { + REG_GET(DCHVM_RIOMMU_STAT0, RIOMMU_ACTIVE, &riommu_active); - //Reflect the power status of DCHUBBUB - REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); + if (riommu_active) + break; + else + udelay(5); + } + + if (riommu_active) { + //Reflect the power status of DCHUBBUB + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_POWERSTATUS, 1); - //Start rIOMMU prefetching - REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); + //Start rIOMMU prefetching + REG_UPDATE(DCHVM_RIOMMU_CTRL0, HOSTVM_PREFETCH_REQ, 1); - // Enable dynamic clock gating - REG_UPDATE_4(DCHVM_CLK_CTRL, - HVM_DISPCLK_R_GATE_DIS, 0, - HVM_DISPCLK_G_GATE_DIS, 0, - HVM_DCFCLK_R_GATE_DIS, 0, - HVM_DCFCLK_G_GATE_DIS, 0); + // Enable dynamic clock gating + REG_UPDATE_4(DCHVM_CLK_CTRL, + HVM_DISPCLK_R_GATE_DIS, 0, + HVM_DISPCLK_G_GATE_DIS, 0, + HVM_DCFCLK_R_GATE_DIS, 0, + HVM_DCFCLK_G_GATE_DIS, 0); - //Poll until HOSTVM_PREFETCH_DONE = 1 - //TODO: Figure out interval us and retry count - REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); + //Poll until HOSTVM_PREFETCH_DONE = 1 + REG_WAIT(DCHVM_RIOMMU_STAT0, HOSTVM_PREFETCH_DONE, 1, 5, 100); + } } -static int hubbub21_init_dchub(struct hubbub *hubbub, +int hubbub21_init_dchub(struct hubbub *hubbub, struct dcn_hubbub_phys_addr_config *pa_config) { struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + struct dcn_vmid_page_table_config phys_config; REG_SET(DCN_VM_FB_LOCATION_BASE, 0, - FB_BASE, pa_config->system_aperture.fb_base); + FB_BASE, pa_config->system_aperture.fb_base >> 24); REG_SET(DCN_VM_FB_LOCATION_TOP, 0, - FB_TOP, pa_config->system_aperture.fb_top); + FB_TOP, pa_config->system_aperture.fb_top >> 24); REG_SET(DCN_VM_FB_OFFSET, 0, - FB_OFFSET, pa_config->system_aperture.fb_offset); + FB_OFFSET, pa_config->system_aperture.fb_offset >> 24); REG_SET(DCN_VM_AGP_BOT, 0, - AGP_BOT, pa_config->system_aperture.agp_bot); + AGP_BOT, pa_config->system_aperture.agp_bot >> 24); REG_SET(DCN_VM_AGP_TOP, 0, - AGP_TOP, pa_config->system_aperture.agp_top); + AGP_TOP, pa_config->system_aperture.agp_top >> 24); REG_SET(DCN_VM_AGP_BASE, 0, - AGP_BASE, pa_config->system_aperture.agp_base); + AGP_BASE, pa_config->system_aperture.agp_base >> 24); + + if (pa_config->gart_config.page_table_start_addr != pa_config->gart_config.page_table_end_addr) { + phys_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr >> 12; + phys_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr >> 12; + phys_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr | 1; //Note: hack + phys_config.depth = 0; + phys_config.block_size = 0; + // Init VMID 0 based on PA config + dcn20_vmid_setup(&hubbub1->vmid[0], &phys_config); + } dcn21_dchvm_init(hubbub); return NUM_VMID; } -static void hubbub21_program_urgent_watermarks( +void hubbub21_program_urgent_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -160,6 +181,13 @@ static void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom); } + if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) { + hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value); + } /* clock state B */ if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) { @@ -192,6 +220,14 @@ static void hubbub21_program_urgent_watermarks( DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom); } + if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) { + hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->b.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value); + } + /* clock state C */ if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) { hubbub1->watermarks.c.urgent_ns = watermarks->c.urgent_ns; @@ -223,6 +259,14 @@ static void hubbub21_program_urgent_watermarks( DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom); } + if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) { + hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->c.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value); + } + /* clock state D */ if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) { hubbub1->watermarks.d.urgent_ns = watermarks->d.urgent_ns; @@ -253,9 +297,17 @@ static void hubbub21_program_urgent_watermarks( REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0, DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom); } + + if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) { + hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns; + prog_wm_value = convert_and_clamp(watermarks->d.urgent_latency_ns, + refclk_mhz, 0x1fffff); + REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0, + DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value); + } } -static void hubbub21_program_stutter_watermarks( +void hubbub21_program_stutter_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -389,7 +441,7 @@ static void hubbub21_program_stutter_watermarks( } } -static void hubbub21_program_pstate_watermarks( +void hubbub21_program_pstate_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, @@ -564,17 +616,26 @@ void hubbub21_wm_read_state(struct hubbub *hubbub, DCHUBBUB_ARB_ALLOW_DRAM_CLK_CHANGE_WATERMARK_D, &s->dram_clk_chanage); } +void hubbub21_apply_DEDCN21_147_wa(struct hubbub *hubbub) +{ + struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub); + uint32_t prog_wm_value; + + prog_wm_value = REG_READ(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A); + REG_WRITE(DCHUBBUB_ARB_DATA_URGENCY_WATERMARK_A, prog_wm_value); +} static const struct hubbub_funcs hubbub21_funcs = { .update_dchub = hubbub2_update_dchub, .init_dchub_sys_ctx = hubbub21_init_dchub, - .init_vm_ctx = NULL, + .init_vm_ctx = hubbub2_init_vm_ctx, .dcc_support_swizzle = hubbub2_dcc_support_swizzle, .dcc_support_pixel_format = hubbub2_dcc_support_pixel_format, .get_dcc_compression_cap = hubbub2_get_dcc_compression_cap, .wm_read_state = hubbub21_wm_read_state, .get_dchub_ref_freq = hubbub2_get_dchub_ref_freq, .program_watermarks = hubbub21_program_watermarks, + .apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa, }; void hubbub21_construct(struct dcn20_hubbub *hubbub, @@ -592,4 +653,5 @@ void hubbub21_construct(struct dcn20_hubbub *hubbub, hubbub->masks = hubbub_mask; hubbub->debug_test_index_pstate = 0xB; + hubbub->detile_buf_size = 164 * 1024; /* 164KB for DCN2.0 */ } diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h index 6ff3cdb89178..c4840dfb1fa5 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h @@ -36,6 +36,10 @@ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B),\ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C),\ SR(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C),\ + SR(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D),\ SR(DCHUBBUB_ARB_HOSTVM_CNTL), \ SR(DCHVM_CTRL0), \ SR(DCHVM_MEM_CTRL), \ @@ -44,16 +48,9 @@ SR(DCHVM_RIOMMU_STAT0) #define HUBBUB_REG_LIST_DCN21()\ - HUBBUB_REG_LIST_DCN_COMMON(), \ + HUBBUB_REG_LIST_DCN20_COMMON(), \ HUBBUB_SR_WATERMARK_REG_LIST(), \ - HUBBUB_HVM_REG_LIST(), \ - SR(DCHUBBUB_CRC_CTRL), \ - SR(DCN_VM_FB_LOCATION_BASE),\ - SR(DCN_VM_FB_LOCATION_TOP),\ - SR(DCN_VM_FB_OFFSET),\ - SR(DCN_VM_AGP_BOT),\ - SR(DCN_VM_AGP_TOP),\ - SR(DCN_VM_AGP_BASE) + HUBBUB_HVM_REG_LIST() #define HUBBUB_MASK_SH_LIST_HVM(mask_sh) \ HUBBUB_SF(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND_COMMIT_THRESHOLD, mask_sh), \ @@ -102,7 +99,7 @@ HUBBUB_SF(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, mask_sh) #define HUBBUB_MASK_SH_LIST_DCN21(mask_sh)\ - HUBBUB_MASK_SH_LIST_HVM(mask_sh),\ + HUBBUB_MASK_SH_LIST_HVM(mask_sh), \ HUBBUB_MASK_SH_LIST_DCN_COMMON(mask_sh), \ HUBBUB_MASK_SH_LIST_STUTTER(mask_sh), \ HUBBUB_SF(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, mask_sh), \ @@ -114,11 +111,28 @@ HUBBUB_SF(DCN_VM_AGP_BASE, AGP_BASE, mask_sh) void dcn21_dchvm_init(struct hubbub *hubbub); +int hubbub21_init_dchub(struct hubbub *hubbub, + struct dcn_hubbub_phys_addr_config *pa_config); void hubbub21_program_watermarks( struct hubbub *hubbub, struct dcn_watermark_set *watermarks, unsigned int refclk_mhz, bool safe_to_lower); +void hubbub21_program_urgent_watermarks( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); +void hubbub21_program_stutter_watermarks( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); +void hubbub21_program_pstate_watermarks( + struct hubbub *hubbub, + struct dcn_watermark_set *watermarks, + unsigned int refclk_mhz, + bool safe_to_lower); void hubbub21_wm_read_state(struct hubbub *hubbub, struct dcn_hubbub_wm *wm); diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c index a00af513aa2b..2f5a5867e674 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c @@ -22,6 +22,8 @@ * Authors: AMD * */ + +#include "dcn10/dcn10_hubp.h" #include "dcn21_hubp.h" #include "dm_services.h" @@ -202,7 +204,7 @@ static struct hubp_funcs dcn21_hubp_funcs = { .hubp_enable_tripleBuffer = hubp2_enable_triplebuffer, .hubp_is_triplebuffer_enabled = hubp2_is_triplebuffer_enabled, .hubp_program_surface_flip_and_addr = hubp2_program_surface_flip_and_addr, - .hubp_program_surface_config = hubp2_program_surface_config, + .hubp_program_surface_config = hubp1_program_surface_config, .hubp_is_flip_pending = hubp1_is_flip_pending, .hubp_setup = hubp21_setup, .hubp_setup_interdependent = hubp2_setup_interdependent, diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c new file mode 100644 index 000000000000..b25215cadf85 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c @@ -0,0 +1,122 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dm_helpers.h" +#include "core_types.h" +#include "resource.h" +#include "dce/dce_hwseq.h" +#include "dcn20/dcn20_hwseq.h" +#include "vmid.h" +#include "reg_helper.h" +#include "hw/clk_mgr.h" + + +#define DC_LOGGER_INIT(logger) + +#define CTX \ + hws->ctx +#define REG(reg)\ + hws->regs->reg + +#undef FN +#define FN(reg_name, field_name) \ + hws->shifts->field_name, hws->masks->field_name + +/* Temporary read settings, future will get values from kmd directly */ +static void mmhub_update_page_table_config(struct dcn_hubbub_phys_addr_config *config, + struct dce_hwseq *hws) +{ + uint32_t page_table_base_hi; + uint32_t page_table_base_lo; + + REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, + PAGE_DIRECTORY_ENTRY_HI32, &page_table_base_hi); + REG_GET(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, + PAGE_DIRECTORY_ENTRY_LO32, &page_table_base_lo); + + config->gart_config.page_table_base_addr = ((uint64_t)page_table_base_hi << 32) | page_table_base_lo; + +} + +static int dcn21_init_sys_ctx(struct dce_hwseq *hws, struct dc *dc, struct dc_phy_addr_space_config *pa_config) +{ + struct dcn_hubbub_phys_addr_config config; + + config.system_aperture.fb_top = pa_config->system_aperture.fb_top; + config.system_aperture.fb_offset = pa_config->system_aperture.fb_offset; + config.system_aperture.fb_base = pa_config->system_aperture.fb_base; + config.system_aperture.agp_top = pa_config->system_aperture.agp_top; + config.system_aperture.agp_bot = pa_config->system_aperture.agp_bot; + config.system_aperture.agp_base = pa_config->system_aperture.agp_base; + config.gart_config.page_table_start_addr = pa_config->gart_config.page_table_start_addr; + config.gart_config.page_table_end_addr = pa_config->gart_config.page_table_end_addr; + config.gart_config.page_table_base_addr = pa_config->gart_config.page_table_base_addr; + + mmhub_update_page_table_config(&config, hws); + + return dc->res_pool->hubbub->funcs->init_dchub_sys_ctx(dc->res_pool->hubbub, &config); +} + +// work around for Renoir s0i3, if register is programmed, bypass golden init. + +static bool dcn21_s0i3_golden_init_wa(struct dc *dc) +{ + struct dce_hwseq *hws = dc->hwseq; + uint32_t value = 0; + + value = REG_READ(MICROSECOND_TIME_BASE_DIV); + + return value != 0x00120464; +} + +void dcn21_exit_optimized_pwr_state( + const struct dc *dc, + struct dc_state *context) +{ + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + false); +} + +void dcn21_optimize_pwr_state( + const struct dc *dc, + struct dc_state *context) +{ + dc->clk_mgr->funcs->update_clocks( + dc->clk_mgr, + context, + true); +} + +void dcn21_hw_sequencer_construct(struct dc *dc) +{ + dcn20_hw_sequencer_construct(dc); + dc->hwss.init_sys_ctx = dcn21_init_sys_ctx; + dc->hwss.s0i3_golden_init_wa = dcn21_s0i3_golden_init_wa; + dc->hwss.optimize_pwr_state = dcn21_optimize_pwr_state; + dc->hwss.exit_optimized_pwr_state = dcn21_exit_optimized_pwr_state; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h new file mode 100644 index 000000000000..be67b62e6fb1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h @@ -0,0 +1,33 @@ +/* +* Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_HWSS_DCN21_H__ +#define __DC_HWSS_DCN21_H__ + +struct dc; + +void dcn21_hw_sequencer_construct(struct dc *dc); + +#endif /* __DC_HWSS_DCN21_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c new file mode 100644 index 000000000000..e8a504ca5890 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.c @@ -0,0 +1,470 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "reg_helper.h" + +#include <linux/delay.h> +#include "core_types.h" +#include "link_encoder.h" +#include "dcn21_link_encoder.h" +#include "stream_encoder.h" + +#include "i2caux_interface.h" +#include "dc_bios_types.h" + +#include "gpio_service_interface.h" + +#define CTX \ + enc10->base.ctx +#define DC_LOGGER \ + enc10->base.ctx->logger + +#define REG(reg)\ + (enc10->link_regs->reg) + +#undef FN +#define FN(reg_name, field_name) \ + enc10->link_shift->field_name, enc10->link_mask->field_name + +#define IND_REG(index) \ + (enc10->link_regs->index) + +static struct mpll_cfg dcn21_mpll_cfg_ref[] = { + // RBR + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 238, + .mpllb_fracn_en = 0, + .mpllb_fracn_quot = 0, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 44237, + .mpllb_ssc_stepsize = 59454, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 2, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 2, + .mpllb_ana_cp_int = 9, + .mpllb_ana_cp_prop = 15, + .hdmi_pixel_clk_div = 0, + }, + // HBR + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 192, + .mpllb_fracn_en = 1, + .mpllb_fracn_quot = 32768, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 36864, + .mpllb_ssc_stepsize = 49545, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 1, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 3, + .mpllb_ana_cp_int = 9, + .mpllb_ana_cp_prop = 15, + .hdmi_pixel_clk_div = 0, + }, + //HBR2 + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 192, + .mpllb_fracn_en = 1, + .mpllb_fracn_quot = 32768, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 36864, + .mpllb_ssc_stepsize = 49545, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 0, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 3, + .mpllb_ana_cp_int = 9, + .mpllb_ana_cp_prop = 15, + .hdmi_pixel_clk_div = 0, + }, + //HBR3 + { + .hdmimode_enable = 0, + .ref_range = 1, + .ref_clk_mpllb_div = 1, + .mpllb_ssc_en = 1, + .mpllb_div5_clk_en = 1, + .mpllb_multiplier = 304, + .mpllb_fracn_en = 1, + .mpllb_fracn_quot = 49152, + .mpllb_fracn_rem = 0, + .mpllb_fracn_den = 1, + .mpllb_ssc_up_spread = 0, + .mpllb_ssc_peak = 55296, + .mpllb_ssc_stepsize = 74318, + .mpllb_div_clk_en = 0, + .mpllb_div_multiplier = 0, + .mpllb_hdmi_div = 0, + .mpllb_tx_clk_div = 0, + .tx_vboost_lvl = 5, + .mpllb_pmix_en = 1, + .mpllb_word_div2_en = 0, + .mpllb_ana_v2i = 2, + .mpllb_ana_freq_vco = 1, + .mpllb_ana_cp_int = 7, + .mpllb_ana_cp_prop = 16, + .hdmi_pixel_clk_div = 0, + }, +}; + + +static bool update_cfg_data( + struct dcn10_link_encoder *enc10, + const struct dc_link_settings *link_settings, + struct dpcssys_phy_seq_cfg *cfg) +{ + int i; + + cfg->load_sram_fw = false; + cfg->use_calibration_setting = true; + + //TODO: need to implement a proper lane mapping for Renoir. + for (i = 0; i < 4; i++) + cfg->lane_en[i] = true; + + switch (link_settings->link_rate) { + case LINK_RATE_LOW: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[0]; + break; + case LINK_RATE_HIGH: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[1]; + break; + case LINK_RATE_HIGH2: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[2]; + break; + case LINK_RATE_HIGH3: + cfg->mpll_cfg = dcn21_mpll_cfg_ref[3]; + break; + default: + DC_LOG_ERROR("%s: No supported link rate found %X!\n", + __func__, link_settings->link_rate); + return false; + } + + return true; +} + +void dcn21_link_encoder_get_max_link_cap(struct link_encoder *enc, + struct dc_link_settings *link_settings) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t value; + + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DP4, &value); + + if (!value && link_settings->lane_count > LANE_COUNT_TWO) + link_settings->lane_count = LANE_COUNT_TWO; +} + +bool dcn21_link_encoder_is_in_alt_mode(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + uint32_t value; + + REG_GET(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DPALT_DISABLE, &value); + + // if value == 1 alt mode is disabled, otherwise it is enabled + return !value; +} + +bool dcn21_link_encoder_acquire_phy(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + int value; + + if (enc->features.flags.bits.DP_IS_USB_C) { + REG_GET(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE, &value); + + if (value == 1) { + ASSERT(0); + return false; + } + REG_UPDATE(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE_ACK, 0); + + udelay(40); + + REG_GET(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE, &value); + if (value == 1) { + ASSERT(0); + REG_UPDATE(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE_ACK, 1); + return false; + } + } + + REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 1); + + return true; +} + + + +static void dcn21_link_encoder_release_phy(struct link_encoder *enc) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + + if (enc->features.flags.bits.DP_IS_USB_C) { + REG_UPDATE(RDPCSTX_PHY_CNTL6, + RDPCS_PHY_DPALT_DISABLE_ACK, 1); + } + + REG_UPDATE(RDPCSTX_PHY_CNTL6, RDPCS_PHY_DP_REF_CLK_EN, 0); + +} + +void dcn21_link_encoder_enable_dp_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source) +{ + struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc); + struct dcn21_link_encoder *enc21 = (struct dcn21_link_encoder *) enc10; + struct dpcssys_phy_seq_cfg *cfg = &enc21->phy_seq_cfg; + + if (!dcn21_link_encoder_acquire_phy(enc)) + return; + + if (!enc->ctx->dc->debug.avoid_vbios_exec_table) { + dcn10_link_encoder_enable_dp_output(enc, link_settings, clock_source); + return; + } + + if (!update_cfg_data(enc10, link_settings, cfg)) + return; + + enc1_configure_encoder(enc10, link_settings); + + dcn10_link_encoder_setup(enc, SIGNAL_TYPE_DISPLAY_PORT); + +} + +void dcn21_link_encoder_enable_dp_mst_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source) +{ + if (!dcn21_link_encoder_acquire_phy(enc)) + return; + + dcn10_link_encoder_enable_dp_mst_output(enc, link_settings, clock_source); +} + +void dcn21_link_encoder_disable_output( + struct link_encoder *enc, + enum signal_type signal) +{ + dcn10_link_encoder_disable_output(enc, signal); + + if (dc_is_dp_signal(signal)) + dcn21_link_encoder_release_phy(enc); +} + + +static const struct link_encoder_funcs dcn21_link_enc_funcs = { +#ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT + .read_state = link_enc2_read_state, +#endif + .validate_output_with_stream = + dcn10_link_encoder_validate_output_with_stream, + .hw_init = enc2_hw_init, + .setup = dcn10_link_encoder_setup, + .enable_tmds_output = dcn10_link_encoder_enable_tmds_output, + .enable_dp_output = dcn21_link_encoder_enable_dp_output, + .enable_dp_mst_output = dcn21_link_encoder_enable_dp_mst_output, + .disable_output = dcn21_link_encoder_disable_output, + .dp_set_lane_settings = dcn10_link_encoder_dp_set_lane_settings, + .dp_set_phy_pattern = dcn10_link_encoder_dp_set_phy_pattern, + .update_mst_stream_allocation_table = + dcn10_link_encoder_update_mst_stream_allocation_table, + .psr_program_dp_dphy_fast_training = + dcn10_psr_program_dp_dphy_fast_training, + .psr_program_secondary_packet = dcn10_psr_program_secondary_packet, + .connect_dig_be_to_fe = dcn10_link_encoder_connect_dig_be_to_fe, + .enable_hpd = dcn10_link_encoder_enable_hpd, + .disable_hpd = dcn10_link_encoder_disable_hpd, + .is_dig_enabled = dcn10_is_dig_enabled, + .destroy = dcn10_link_encoder_destroy, + .fec_set_enable = enc2_fec_set_enable, + .fec_set_ready = enc2_fec_set_ready, + .fec_is_active = enc2_fec_is_active, + .get_dig_frontend = dcn10_get_dig_frontend, + .is_in_alt_mode = dcn21_link_encoder_is_in_alt_mode, + .get_max_link_cap = dcn21_link_encoder_get_max_link_cap, +}; + +void dcn21_link_encoder_construct( + struct dcn21_link_encoder *enc21, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask) +{ + struct bp_encoder_cap_info bp_cap_info = {0}; + const struct dc_vbios_funcs *bp_funcs = init_data->ctx->dc_bios->funcs; + enum bp_result result = BP_RESULT_OK; + struct dcn10_link_encoder *enc10 = &enc21->enc10; + + enc10->base.funcs = &dcn21_link_enc_funcs; + enc10->base.ctx = init_data->ctx; + enc10->base.id = init_data->encoder; + + enc10->base.hpd_source = init_data->hpd_source; + enc10->base.connector = init_data->connector; + + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + + enc10->base.features = *enc_features; + + enc10->base.transmitter = init_data->transmitter; + + /* set the flag to indicate whether driver poll the I2C data pin + * while doing the DP sink detect + */ + +/* if (dal_adapter_service_is_feature_supported(as, + FEATURE_DP_SINK_DETECT_POLL_DATA_PIN)) + enc10->base.features.flags.bits. + DP_SINK_DETECT_POLL_DATA_PIN = true;*/ + + enc10->base.output_signals = + SIGNAL_TYPE_DVI_SINGLE_LINK | + SIGNAL_TYPE_DVI_DUAL_LINK | + SIGNAL_TYPE_LVDS | + SIGNAL_TYPE_DISPLAY_PORT | + SIGNAL_TYPE_DISPLAY_PORT_MST | + SIGNAL_TYPE_EDP | + SIGNAL_TYPE_HDMI_TYPE_A; + + /* For DCE 8.0 and 8.1, by design, UNIPHY is hardwired to DIG_BE. + * SW always assign DIG_FE 1:1 mapped to DIG_FE for non-MST UNIPHY. + * SW assign DIG_FE to non-MST UNIPHY first and MST last. So prefer + * DIG is per UNIPHY and used by SST DP, eDP, HDMI, DVI and LVDS. + * Prefer DIG assignment is decided by board design. + * For DCE 8.0, there are only max 6 UNIPHYs, we assume board design + * and VBIOS will filter out 7 UNIPHY for DCE 8.0. + * By this, adding DIGG should not hurt DCE 8.0. + * This will let DCE 8.1 share DCE 8.0 as much as possible + */ + + enc10->link_regs = link_regs; + enc10->aux_regs = aux_regs; + enc10->hpd_regs = hpd_regs; + enc10->link_shift = link_shift; + enc10->link_mask = link_mask; + + switch (enc10->base.transmitter) { + case TRANSMITTER_UNIPHY_A: + enc10->base.preferred_engine = ENGINE_ID_DIGA; + break; + case TRANSMITTER_UNIPHY_B: + enc10->base.preferred_engine = ENGINE_ID_DIGB; + break; + case TRANSMITTER_UNIPHY_C: + enc10->base.preferred_engine = ENGINE_ID_DIGC; + break; + case TRANSMITTER_UNIPHY_D: + enc10->base.preferred_engine = ENGINE_ID_DIGD; + break; + case TRANSMITTER_UNIPHY_E: + enc10->base.preferred_engine = ENGINE_ID_DIGE; + break; + case TRANSMITTER_UNIPHY_F: + enc10->base.preferred_engine = ENGINE_ID_DIGF; + break; + case TRANSMITTER_UNIPHY_G: + enc10->base.preferred_engine = ENGINE_ID_DIGG; + break; + default: + ASSERT_CRITICAL(false); + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + } + + /* default to one to mirror Windows behavior */ + enc10->base.features.flags.bits.HDMI_6GB_EN = 1; + + result = bp_funcs->get_encoder_cap_info(enc10->base.ctx->dc_bios, + enc10->base.id, &bp_cap_info); + + /* Override features with DCE-specific values */ + if (result == BP_RESULT_OK) { + enc10->base.features.flags.bits.IS_HBR2_CAPABLE = + bp_cap_info.DP_HBR2_EN; + enc10->base.features.flags.bits.IS_HBR3_CAPABLE = + bp_cap_info.DP_HBR3_EN; + enc10->base.features.flags.bits.HDMI_6GB_EN = bp_cap_info.HDMI_6GB_EN; + enc10->base.features.flags.bits.DP_IS_USB_C = + bp_cap_info.DP_IS_USB_C; + } else { + DC_LOG_WARNING("%s: Failed to get encoder_cap_info from VBIOS with error code %d!\n", + __func__, + result); + } + if (enc10->base.ctx->dc->debug.hdmi20_disable) { + enc10->base.features.flags.bits.HDMI_6GB_EN = 0; + } +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h new file mode 100644 index 000000000000..1d7a1a51f13d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_link_encoder.h @@ -0,0 +1,61 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_LINK_ENCODER__DCN21_H__ +#define __DC_LINK_ENCODER__DCN21_H__ + +#include "dcn20/dcn20_link_encoder.h" + +struct dcn21_link_encoder { + struct dcn10_link_encoder enc10; + struct dpcssys_phy_seq_cfg phy_seq_cfg; +}; + +#define LINK_ENCODER_MASK_SH_LIST_DCN21(mask_sh)\ + LINK_ENCODER_MASK_SH_LIST_DCN20(mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL0_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL1_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL2_XBAR_SOURCE, mask_sh),\ + LE_SF(UNIPHYA_CHANNEL_XBAR_CNTL, UNIPHY_CHANNEL3_XBAR_SOURCE, mask_sh), \ + SRI(RDPCSTX_PHY_FUSE2, RDPCSTX, id), \ + SRI(RDPCSTX_PHY_FUSE3, RDPCSTX, id), \ + SR(RDPCSTX0_RDPCSTX_SCRATCH) + +void dcn21_link_encoder_enable_dp_output( + struct link_encoder *enc, + const struct dc_link_settings *link_settings, + enum clock_source_id clock_source); + +void dcn21_link_encoder_construct( + struct dcn21_link_encoder *enc21, + const struct encoder_init_data *init_data, + const struct encoder_feature_support *enc_features, + const struct dcn10_link_enc_registers *link_regs, + const struct dcn10_link_enc_aux_registers *aux_regs, + const struct dcn10_link_enc_hpd_registers *hpd_regs, + const struct dcn10_link_enc_shift *link_shift, + const struct dcn10_link_enc_mask *link_mask); + +#endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index de182185fe1f..459bd9a5caed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -23,8 +23,6 @@ * */ -#include <linux/slab.h> - #include "dm_services.h" #include "dc.h" @@ -42,11 +40,11 @@ #include "irq/dcn21/irq_service_dcn21.h" #include "dcn20/dcn20_dpp.h" #include "dcn20/dcn20_optc.h" -#include "dcn20/dcn20_hwseq.h" +#include "dcn21/dcn21_hwseq.h" #include "dce110/dce110_hw_sequencer.h" #include "dcn20/dcn20_opp.h" #include "dcn20/dcn20_dsc.h" -#include "dcn20/dcn20_link_encoder.h" +#include "dcn21/dcn21_link_encoder.h" #include "dcn20/dcn20_stream_encoder.h" #include "dce/dce_clock_source.h" #include "dce/dce_audio.h" @@ -84,8 +82,9 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = { - .gpuvm_enable = 0, - .hostvm_enable = 0, + .odm_capable = 1, + .gpuvm_enable = 1, + .hostvm_enable = 1, .gpuvm_max_page_table_levels = 1, .hostvm_max_page_table_levels = 4, .hostvm_cached_page_table_levels = 2, @@ -205,11 +204,11 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .state = 4, .dcfclk_mhz = 810.0, .fabricclk_mhz = 1600.0, - .dispclk_mhz = 1015.0, - .dppclk_mhz = 1015.0, - .phyclk_mhz = 810.0, + .dispclk_mhz = 1395.0, + .dppclk_mhz = 1285.0, + .phyclk_mhz = 1325.0, .socclk_mhz = 953.0, - .dscclk_mhz = 318.334, + .dscclk_mhz = 489.0, .dram_speed_mts = 4266.0, }, /*Extra state, no dispclk ramping*/ @@ -217,18 +216,18 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .state = 5, .dcfclk_mhz = 810.0, .fabricclk_mhz = 1600.0, - .dispclk_mhz = 1015.0, - .dppclk_mhz = 1015.0, - .phyclk_mhz = 810.0, + .dispclk_mhz = 1395.0, + .dppclk_mhz = 1285.0, + .phyclk_mhz = 1325.0, .socclk_mhz = 953.0, - .dscclk_mhz = 318.334, + .dscclk_mhz = 489.0, .dram_speed_mts = 4266.0, }, }, - .sr_exit_time_us = 9.0, - .sr_enter_plus_exit_time_us = 11.0, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 17.0, .urgent_latency_us = 4.0, .urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, @@ -350,6 +349,30 @@ static const struct bios_registers bios_regs = { NBIO_SR(BIOS_SCRATCH_6) }; +static const struct dce_dmcu_registers dmcu_regs = { + DMCU_DCN10_REG_LIST() +}; + +static const struct dce_dmcu_shift dmcu_shift = { + DMCU_MASK_SH_LIST_DCN10(__SHIFT) +}; + +static const struct dce_dmcu_mask dmcu_mask = { + DMCU_MASK_SH_LIST_DCN10(_MASK) +}; + +static const struct dce_abm_registers abm_regs = { + ABM_DCN20_REG_LIST() +}; + +static const struct dce_abm_shift abm_shift = { + ABM_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dce_abm_mask abm_mask = { + ABM_MASK_SH_LIST_DCN20(_MASK) +}; + #ifdef CONFIG_DRM_AMD_DC_DMUB static const struct dcn21_dmcub_registers dmcub_regs = { DMCUB_REG_LIST_DCN() @@ -628,6 +651,14 @@ static const struct dcn10_stream_enc_registers stream_enc_regs[] = { stream_enc_regs(4), }; +static const struct dce110_aux_registers_shift aux_shift = { + DCN_AUX_MASK_SH_LIST(__SHIFT) +}; + +static const struct dce110_aux_registers_mask aux_mask = { + DCN_AUX_MASK_SH_LIST(_MASK) +}; + static const struct dcn10_stream_encoder_shift se_shift = { SE_COMMON_MASK_SH_LIST_DCN20(__SHIFT) }; @@ -636,6 +667,11 @@ static const struct dcn10_stream_encoder_mask se_mask = { SE_COMMON_MASK_SH_LIST_DCN20(_MASK) }; +static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu); + +static int dcn21_populate_dml_pipes_from_context( + struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes); + static struct input_pixel_processor *dcn21_ipp_create( struct dc_context *ctx, uint32_t inst) { @@ -683,7 +719,10 @@ static struct dce_aux *dcn21_aux_engine_create( dce110_aux_engine_construct(aux_engine, ctx, inst, SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD, - &aux_engine_regs[inst]); + &aux_engine_regs[inst], + &aux_mask, + &aux_shift, + ctx->dc->caps.extended_aux_timeout_support); return &aux_engine->base; } @@ -726,11 +765,12 @@ static const struct resource_caps res_cap_rn = { .num_timing_generator = 4, .num_opp = 4, .num_video_plane = 4, - .num_audio = 6, // 6 audio endpoints. 4 audio streams + .num_audio = 4, // 4 audio endpoints. 4 audio streams .num_stream_encoder = 5, .num_pll = 5, // maybe 3 because the last two used for USB-c .num_dwb = 1, .num_ddc = 5, + .num_vmid = 1, #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT .num_dsc = 3, #endif @@ -796,15 +836,15 @@ static const struct dc_debug_options debug_defaults_drv = { .clock_trace = true, .disable_pplib_clock_request = true, .pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP, - .force_single_disp_pipe_split = true, + .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, .performance_trace = false, - .max_downscale_src_width = 5120,/*upto 5K*/ + .max_downscale_src_width = 3840, .disable_pplib_wm_range = false, .scl_reset_length10 = true, .sanity_checks = true, - .disable_48mhz_pwrdwn = true, + .disable_48mhz_pwrdwn = false, }; static const struct dc_debug_options debug_defaults_diags = { @@ -939,7 +979,7 @@ static void destruct(struct dcn21_resource_pool *pool) dcn_dccg_destroy(&pool->base.dccg); if (pool->base.pp_smu != NULL) - dcn20_pp_smu_destroy(&pool->base.pp_smu); + dcn21_pp_smu_destroy(&pool->base.pp_smu); } @@ -969,11 +1009,35 @@ static void calculate_wm_set_for_vlevel( #if defined(CONFIG_DRM_AMD_DC_DCN2_1) wm_set->frac_urg_bw_nom = get_fraction_of_urgent_bandwidth(dml, pipes, pipe_cnt) * 1000; wm_set->frac_urg_bw_flip = get_fraction_of_urgent_bandwidth_imm_flip(dml, pipes, pipe_cnt) * 1000; + wm_set->urgent_latency_ns = get_urgent_latency(dml, pipes, pipe_cnt) * 1000; #endif dml->soc.dram_clock_change_latency_us = dram_clock_change_latency_cached; } +static void patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st *bb) +{ + kernel_fpu_begin(); + if (dc->bb_overrides.sr_exit_time_ns) { + bb->sr_exit_time_us = dc->bb_overrides.sr_exit_time_ns / 1000.0; + } + + if (dc->bb_overrides.sr_enter_plus_exit_time_ns) { + bb->sr_enter_plus_exit_time_us = + dc->bb_overrides.sr_enter_plus_exit_time_ns / 1000.0; + } + + if (dc->bb_overrides.urgent_latency_ns) { + bb->urgent_latency_us = dc->bb_overrides.urgent_latency_ns / 1000.0; + } + + if (dc->bb_overrides.dram_clock_change_latency_ns) { + bb->dram_clock_change_latency_us = + dc->bb_overrides.dram_clock_change_latency_ns / 1000.0; + } + kernel_fpu_end(); +} + void dcn21_calculate_wm( struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, @@ -988,6 +1052,8 @@ void dcn21_calculate_wm( ASSERT(bw_params); + patch_bounding_box(dc, &context->bw_ctx.dml.soc); + for (i = 0, pipe_idx = 0, pipe_cnt = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; @@ -1021,7 +1087,7 @@ void dcn21_calculate_wm( pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, &context->res_ctx, pipes); else - pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, + pipe_cnt = dcn21_populate_dml_pipes_from_context(dc, &context->res_ctx, pipes); } @@ -1271,6 +1337,12 @@ struct display_stream_compressor *dcn21_dsc_create( static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { + /* + TODO: Fix this function to calcualte correct values. + There are known issues with this function currently + that will need to be investigated. Use hardcoded known good values for now. + + struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool); struct clk_limit_table *clk_table = &bw_params->clk_table; int i; @@ -1278,7 +1350,6 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator; dcn2_1_ip.max_num_dpp = pool->base.pipe_count; dcn2_1_soc.num_chans = bw_params->num_channels; - dcn2_1_soc.num_states = 0; for (i = 0; i < clk_table->num_entries; i++) { @@ -1286,10 +1357,11 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz; dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz; dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz; - /* This is probably wrong, TODO: find correct calculation */ dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 16 / 1000; - dcn2_1_soc.num_states++; } + dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - i]; + dcn2_1_soc.num_states = i; + */ } /* Temporary Place holder until we can get them from fuse */ @@ -1317,32 +1389,42 @@ static struct dpm_clocks dummy_clocks = { }; -enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp, +static enum pp_smu_status dummy_set_wm_ranges(struct pp_smu *pp, struct pp_smu_wm_range_sets *ranges) { return PP_SMU_RESULT_OK; } -enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp, +static enum pp_smu_status dummy_get_dpm_clock_table(struct pp_smu *pp, struct dpm_clocks *clock_table) { *clock_table = dummy_clocks; return PP_SMU_RESULT_OK; } -struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx) +static struct pp_smu_funcs *dcn21_pp_smu_create(struct dc_context *ctx) { struct pp_smu_funcs *pp_smu = kzalloc(sizeof(*pp_smu), GFP_KERNEL); - pp_smu->ctx.ver = PP_SMU_VER_RN; + if (!pp_smu) + return pp_smu; + + if (IS_FPGA_MAXIMUS_DC(ctx->dce_environment) || IS_DIAG_DC(ctx->dce_environment)) { + pp_smu->ctx.ver = PP_SMU_VER_RN; + pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table; + pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges; + } else { - pp_smu->rn_funcs.get_dpm_clock_table = dummy_get_dpm_clock_table; - pp_smu->rn_funcs.set_wm_ranges = dummy_set_wm_ranges; + dm_pp_get_funcs(ctx, pp_smu); + + if (pp_smu->ctx.ver != PP_SMU_VER_RN) + pp_smu = memset(pp_smu, 0, sizeof(struct pp_smu_funcs)); + } return pp_smu; } -void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu) +static void dcn21_pp_smu_destroy(struct pp_smu_funcs **pp_smu) { if (pp_smu && *pp_smu) { kfree(*pp_smu); @@ -1400,6 +1482,7 @@ static struct dce_hwseq *dcn21_hwseq_create( hws->regs = &hwseq_reg; hws->shifts = &hwseq_shift; hws->masks = &hwseq_mask; + hws->wa.DEGVIDCN21 = true; } return hws; } @@ -1418,10 +1501,152 @@ static const struct resource_create_funcs res_create_maximus_funcs = { .create_hwseq = dcn21_hwseq_create, }; +static const struct encoder_feature_support link_enc_feature = { + .max_hdmi_deep_color = COLOR_DEPTH_121212, + .max_hdmi_pixel_clock = 600000, + .hdmi_ycbcr420_supported = true, + .dp_ycbcr420_supported = true, + .flags.bits.IS_HBR2_CAPABLE = true, + .flags.bits.IS_HBR3_CAPABLE = true, + .flags.bits.IS_TPS3_CAPABLE = true, + .flags.bits.IS_TPS4_CAPABLE = true +}; + + +#define link_regs(id, phyid)\ +[id] = {\ + LE_DCN10_REG_LIST(id), \ + UNIPHY_DCN2_REG_LIST(phyid), \ + SRI(DP_DPHY_INTERNAL_CTRL, DP, id) \ +} + +static const struct dcn10_link_enc_registers link_enc_regs[] = { + link_regs(0, A), + link_regs(1, B), + link_regs(2, C), + link_regs(3, D), + link_regs(4, E), +}; + +#define aux_regs(id)\ +[id] = {\ + DCN2_AUX_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_aux_registers link_enc_aux_regs[] = { + aux_regs(0), + aux_regs(1), + aux_regs(2), + aux_regs(3), + aux_regs(4) +}; + +#define hpd_regs(id)\ +[id] = {\ + HPD_REG_LIST(id)\ +} + +static const struct dcn10_link_enc_hpd_registers link_enc_hpd_regs[] = { + hpd_regs(0), + hpd_regs(1), + hpd_regs(2), + hpd_regs(3), + hpd_regs(4) +}; + +static const struct dcn10_link_enc_shift le_shift = { + LINK_ENCODER_MASK_SH_LIST_DCN20(__SHIFT) +}; + +static const struct dcn10_link_enc_mask le_mask = { + LINK_ENCODER_MASK_SH_LIST_DCN20(_MASK) +}; + +static int map_transmitter_id_to_phy_instance( + enum transmitter transmitter) +{ + switch (transmitter) { + case TRANSMITTER_UNIPHY_A: + return 0; + break; + case TRANSMITTER_UNIPHY_B: + return 1; + break; + case TRANSMITTER_UNIPHY_C: + return 2; + break; + case TRANSMITTER_UNIPHY_D: + return 3; + break; + case TRANSMITTER_UNIPHY_E: + return 4; + break; + default: + ASSERT(0); + return 0; + } +} + +static struct link_encoder *dcn21_link_encoder_create( + const struct encoder_init_data *enc_init_data) +{ + struct dcn21_link_encoder *enc21 = + kzalloc(sizeof(struct dcn21_link_encoder), GFP_KERNEL); + int link_regs_id; + + if (!enc21) + return NULL; + + link_regs_id = + map_transmitter_id_to_phy_instance(enc_init_data->transmitter); + + dcn21_link_encoder_construct(enc21, + enc_init_data, + &link_enc_feature, + &link_enc_regs[link_regs_id], + &link_enc_aux_regs[enc_init_data->channel - 1], + &link_enc_hpd_regs[enc_init_data->hpd_source], + &le_shift, + &le_mask); + + return &enc21->enc10.base; +} +#define CTX ctx + +#define REG(reg_name) \ + (DCN_BASE.instance[0].segment[mm ## reg_name ## _BASE_IDX] + mm ## reg_name) + +static uint32_t read_pipe_fuses(struct dc_context *ctx) +{ + uint32_t value = REG_READ(CC_DC_PIPE_DIS); + /* RV1 support max 4 pipes */ + value = value & 0xf; + return value; +} + +static int dcn21_populate_dml_pipes_from_context( + struct dc *dc, struct resource_context *res_ctx, display_e2e_pipe_params_st *pipes) +{ + uint32_t pipe_cnt = dcn20_populate_dml_pipes_from_context(dc, res_ctx, pipes); + int i; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + + if (!res_ctx->pipe_ctx[i].stream) + continue; + + pipes[i].pipe.src.hostvm = 1; + pipes[i].pipe.src.gpuvm = 1; + } + + return pipe_cnt; +} + static struct resource_funcs dcn21_res_pool_funcs = { .destroy = dcn21_destroy_resource_pool, - .link_enc_create = dcn20_link_encoder_create, + .link_enc_create = dcn21_link_encoder_create, .validate_bandwidth = dcn21_validate_bandwidth, + .populate_dml_pipes = dcn21_populate_dml_pipes_from_context, .add_stream_to_ctx = dcn20_add_stream_to_ctx, .remove_stream_from_ctx = dcn20_remove_stream_from_ctx, .acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer, @@ -1437,9 +1662,11 @@ static bool construct( struct dc *dc, struct dcn21_resource_pool *pool) { - int i; + int i, j; struct dc_context *ctx = dc->ctx; struct irq_service_init_data init_data; + uint32_t pipe_fuses = read_pipe_fuses(ctx); + uint32_t num_pipes; ctx->dc_bios->regs = &bios_regs; @@ -1457,7 +1684,9 @@ static bool construct( *************************************************/ pool->base.underlay_pipe_index = NO_UNDERLAY_PIPE; - pool->base.pipe_count = 4; + /* max pipe num for ASIC before check pipe fuses */ + pool->base.pipe_count = pool->base.res_cap->num_timing_generator; + dc->caps.max_downscale_ratio = 200; dc->caps.i2c_speed_in_khz = 100; dc->caps.max_cursor_size = 256; @@ -1467,6 +1696,7 @@ static bool construct( dc->caps.max_slave_planes = 1; dc->caps.post_blend_color_processing = true; dc->caps.force_dp_tps4_for_cp2520 = true; + dc->caps.extended_aux_timeout_support = true; if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV) dc->debug = debug_defaults_drv; @@ -1516,6 +1746,26 @@ static bool construct( goto create_fail; } + pool->base.dmcu = dcn20_dmcu_create(ctx, + &dmcu_regs, + &dmcu_shift, + &dmcu_mask); + if (pool->base.dmcu == NULL) { + dm_error("DC: failed to create dmcu!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + + pool->base.abm = dce_abm_create(ctx, + &abm_regs, + &abm_shift, + &abm_mask); + if (pool->base.abm == NULL) { + dm_error("DC: failed to create abm!\n"); + BREAK_TO_DEBUGGER(); + goto create_fail; + } + #ifdef CONFIG_DRM_AMD_DC_DMUB pool->base.dmcub = dcn21_dmcub_create(ctx, &dmcub_regs, @@ -1530,6 +1780,14 @@ static bool construct( pool->base.pp_smu = dcn21_pp_smu_create(ctx); + num_pipes = dcn2_1_ip.max_num_dpp; + + for (i = 0; i < dcn2_1_ip.max_num_dpp; i++) + if (pipe_fuses & 1 << i) + num_pipes--; + dcn2_1_ip.max_num_dpp = num_pipes; + dcn2_1_ip.max_num_otg = num_pipes; + dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21); init_data.ctx = dc->ctx; @@ -1537,8 +1795,15 @@ static bool construct( if (!pool->base.irqs) goto create_fail; + j = 0; /* mem input -> ipp -> dpp -> opp -> TG */ for (i = 0; i < pool->base.pipe_count; i++) { + /* if pipe is disabled, skip instance of HW pipe, + * i.e, skip ASIC register instance + */ + if ((pipe_fuses & (1 << i)) != 0) + continue; + pool->base.hubps[i] = dcn21_hubp_create(ctx, i); if (pool->base.hubps[i] == NULL) { BREAK_TO_DEBUGGER(); @@ -1562,6 +1827,23 @@ static bool construct( "DC: failed to create dpps!\n"); goto create_fail; } + + pool->base.opps[i] = dcn21_opp_create(ctx, i); + if (pool->base.opps[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error( + "DC: failed to create output pixel processor!\n"); + goto create_fail; + } + + pool->base.timing_generators[i] = dcn21_timing_generator_create( + ctx, i); + if (pool->base.timing_generators[i] == NULL) { + BREAK_TO_DEBUGGER(); + dm_error("DC: failed to create tg!\n"); + goto create_fail; + } + j++; } for (i = 0; i < pool->base.res_cap->num_ddc; i++) { @@ -1582,27 +1864,9 @@ static bool construct( pool->base.sw_i2cs[i] = NULL; } - for (i = 0; i < pool->base.res_cap->num_opp; i++) { - pool->base.opps[i] = dcn21_opp_create(ctx, i); - if (pool->base.opps[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error( - "DC: failed to create output pixel processor!\n"); - goto create_fail; - } - } - - for (i = 0; i < pool->base.res_cap->num_timing_generator; i++) { - pool->base.timing_generators[i] = dcn21_timing_generator_create( - ctx, i); - if (pool->base.timing_generators[i] == NULL) { - BREAK_TO_DEBUGGER(); - dm_error("DC: failed to create tg!\n"); - goto create_fail; - } - } - - pool->base.timing_generator_count = i; + pool->base.timing_generator_count = j; + pool->base.pipe_count = j; + pool->base.mpcc_count = j; pool->base.mpc = dcn21_mpc_create(ctx); if (pool->base.mpc == NULL) { @@ -1645,7 +1909,7 @@ static bool construct( &res_create_funcs : &res_create_maximus_funcs))) goto create_fail; - dcn20_hw_sequencer_construct(dc); + dcn21_hw_sequencer_construct(dc); dc->caps.max_planes = pool->base.pipe_count; diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h new file mode 100644 index 000000000000..626d22d437f4 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h @@ -0,0 +1,49 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef DM_CP_PSP_IF__H +#define DM_CP_PSP_IF__H + +struct dc_link; + +struct cp_psp_stream_config { + uint8_t otg_inst; + uint8_t link_enc_inst; + uint8_t stream_enc_inst; + void *dm_stream_ctx; + bool dpms_off; +}; + +struct cp_psp_funcs { + void (*update_stream_config)(void *handle, struct cp_psp_stream_config *config); +}; + +struct cp_psp { + void *handle; + struct cp_psp_funcs funcs; +}; + + +#endif /* DM_CP_PSP_IF__H */ diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index b6b4333737f2..94b75e942607 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -74,7 +74,7 @@ void dm_helpers_dp_mst_clear_payload_allocation_table( /* * Polls for ACT (allocation change trigger) handled and */ -bool dm_helpers_dp_mst_poll_for_allocation_change_trigger( +enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( struct dc_context *ctx, const struct dc_stream_state *stream); /* diff --git a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h index c03a441ee638..ef7df9ef6d7e 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h +++ b/drivers/gpu/drm/amd/display/dc/dm_pp_smu.h @@ -249,10 +249,8 @@ struct pp_smu_funcs_nv { }; #endif -#if defined(CONFIG_DRM_AMD_DC_DCN2_1) - #define PP_SMU_NUM_SOCCLK_DPM_LEVELS 8 -#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 4 +#define PP_SMU_NUM_DCFCLK_DPM_LEVELS 8 #define PP_SMU_NUM_FCLK_DPM_LEVELS 4 #define PP_SMU_NUM_MEMCLK_DPM_LEVELS 4 @@ -288,7 +286,6 @@ struct pp_smu_funcs_rn { enum pp_smu_status (*get_dpm_clock_table) (struct pp_smu *pp, struct dpm_clocks *clock_table); }; -#endif struct pp_smu_funcs { struct pp_smu ctx; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c index 0fafd693ffb4..3c70dd577292 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c @@ -38,6 +38,7 @@ #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN20_MAX_DSC_IMAGE_WIDTH 5184 static double adjust_ReturnBW( struct display_mode_lib *mode_lib, @@ -2610,7 +2611,8 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP mode_lib->vba.MinActiveDRAMClockChangeMargin + mode_lib->vba.DRAMClockChangeLatency; - if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 0) { + if (mode_lib->vba.MinActiveDRAMClockChangeMargin > 50) { + mode_lib->vba.DRAMClockChangeWatermark += 25; mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive; } else { if (mode_lib->vba.SynchronizedVBlank || mode_lib->vba.NumberOfActivePlanes == 1) { @@ -3901,6 +3903,10 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode mode_lib->vba.MaximumSwathWidthInLineBuffer); } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown( + mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states], + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + for (j = 0; j < 2; j++) { mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown( mode_lib->vba.MaxDispclk[i], @@ -3925,7 +3931,9 @@ void dml20v2_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { + if (mode_lib->vba.ODMCapability == false || + (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown + && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN20_MAX_DSC_IMAGE_WIDTH))) { locals->ODMCombineEnablePerState[i][k] = false; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 878bf4782ce6..2c7455e22a65 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); - // FIXME: take the max between luma, chroma chunk size? + // TODO: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; @@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, unsigned int meta_pitch = 0; unsigned int ppe = mode_422 ? 2 : 1; - // FIXME check if ppe apply for both luma and chroma in 422 case + // TODO check if ppe apply for both luma and chroma in 422 case if (is_chroma) { vp_width = pipe_src_param.viewport_width_c / ppe; vp_height = pipe_src_param.viewport_height_c; @@ -959,7 +959,7 @@ static void dml20_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); - mode_422 = 0; // FIXME + mode_422 = 0; // TODO access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); @@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1) * (double) cur_req_width; cur_req_per_width = cur_width_ub / (double) cur_req_width; - hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor + hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor if (vratio_pre_l <= 1.0) { *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index ed8bf5f723c9..1e6aeb1bd2bf 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -207,7 +207,7 @@ static void extract_rq_regs(struct display_mode_lib *mode_lib, rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); - // FIXME: take the max between luma, chroma chunk size? + // TODO: take the max between luma, chroma chunk size? // okay for now, as we are setting chunk_bytes to 8kb anyways if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { //32kb rq_regs->drq_expansion_mode = 0; @@ -677,7 +677,7 @@ static void get_surf_rq_param(struct display_mode_lib *mode_lib, unsigned int meta_pitch = 0; unsigned int ppe = mode_422 ? 2 : 1; - // FIXME check if ppe apply for both luma and chroma in 422 case + // TODO check if ppe apply for both luma and chroma in 422 case if (is_chroma) { vp_width = pipe_src_param.viewport_width_c / ppe; vp_height = pipe_src_param.viewport_height_c; @@ -959,7 +959,7 @@ static void dml20v2_rq_dlg_get_dlg_params(struct display_mode_lib *mode_lib, // Source // dcc_en = src.dcc; dual_plane = is_dual_plane((enum source_format_class)(src->source_format)); - mode_422 = 0; // FIXME + mode_422 = 0; // TODO access_dir = (src->source_scan == dm_vert); // vp access direction: horizontal or vertical accessed // bytes_per_element_l = get_bytes_per_element(source_format_class(src.source_format), 0); // bytes_per_element_c = get_bytes_per_element(source_format_class(src.source_format), 1); @@ -1655,7 +1655,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib, cur_width_ub = dml_ceil((double) cur_src_width / (double) cur_req_width, 1) * (double) cur_req_width; cur_req_per_width = cur_width_ub / (double) cur_req_width; - hactive_cur = (double) cur_src_width / hscl_ratio; // FIXME: oswin to think about what to do for cursor + hactive_cur = (double) cur_src_width / hscl_ratio; // TODO: oswin to think about what to do for cursor if (vratio_pre_l <= 1.0) { *refcyc_per_req_delivery_pre_cur = hactive_cur * ref_freq_to_pix_freq diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c index 3b6ed60dcd35..ba77957aefe3 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_mode_vba_21.c @@ -65,6 +65,7 @@ typedef struct { #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define DCN21_MAX_DSC_IMAGE_WIDTH 5184 static void DisplayPipeConfiguration(struct display_mode_lib *mode_lib); static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerformanceCalculation( @@ -3379,6 +3380,8 @@ static unsigned int TruncToValidBPP( return 30; else if (DecimalBPP >= 24 && (DesiredBPP == 0 || DesiredBPP == 24)) return 24; + else if (DecimalBPP >= 18 && (DesiredBPP == 0 || DesiredBPP == 18)) + return 18; else return BPP_INVALID; } @@ -3936,6 +3939,10 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.MaximumSwathWidthInLineBuffer); } for (i = 0; i <= mode_lib->vba.soc.num_states; i++) { + double MaxMaxDispclkRoundedDown = RoundToDFSGranularityDown( + mode_lib->vba.MaxDispclk[mode_lib->vba.soc.num_states], + mode_lib->vba.DISPCLKDPPCLKVCOSpeed); + for (j = 0; j < 2; j++) { mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity = RoundToDFSGranularityDown( mode_lib->vba.MaxDispclk[i], @@ -3965,7 +3972,9 @@ void dml21_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l && i == mode_lib->vba.soc.num_states) mode_lib->vba.PlaneRequiredDISPCLKWithODMCombine = mode_lib->vba.PixelClock[k] / 2 * (1 + mode_lib->vba.DISPCLKDPPCLKDSCCLKDownSpreading / 100.0); - if (mode_lib->vba.ODMCapability == false || mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine <= mode_lib->vba.MaxDispclkRoundedDownToDFSGranularity) { + if (mode_lib->vba.ODMCapability == false || + (locals->PlaneRequiredDISPCLKWithoutODMCombine <= MaxMaxDispclkRoundedDown + && (!locals->DSCEnabled[k] || locals->HActive[k] <= DCN21_MAX_DSC_IMAGE_WIDTH))) { locals->ODMCombineEnablePerState[i][k] = false; mode_lib->vba.PlaneRequiredDISPCLK = mode_lib->vba.PlaneRequiredDISPCLKWithoutODMCombine; } else { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index f4c1ef9046bf..cfacd6027467 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -269,7 +269,7 @@ struct writeback_st { struct _vcs_dpi_display_output_params_st { int dp_lanes; - int output_bpp; + double output_bpp; int dsc_enable; int wb_enable; int num_active_wb; @@ -318,6 +318,7 @@ struct _vcs_dpi_display_pipe_dest_params_st { unsigned int vupdate_width; unsigned int vready_offset; unsigned char interlaced; + unsigned char embedded; double pixel_rate_mhz; unsigned char synchronized_vblank_all_planes; unsigned char otg_inst; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 65cf4edddaff..7f9a5621922f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -375,6 +375,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes; + mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded; mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1; mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] = (enum scan_direction_class) (src->source_scan); @@ -432,8 +433,12 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) dst->recout_width; // TODO: or should this be full_recout_width???...maybe only when in hsplit mode? mode_lib->vba.ODMCombineEnabled[mode_lib->vba.NumberOfActivePlanes] = dst->odm_combine; + mode_lib->vba.ODMCombineTypeEnabled[mode_lib->vba.NumberOfActivePlanes] = + dst->odm_combine; mode_lib->vba.OutputFormat[mode_lib->vba.NumberOfActivePlanes] = (enum output_format_class) (dout->output_format); + mode_lib->vba.OutputBpp[mode_lib->vba.NumberOfActivePlanes] = + dout->output_bpp; mode_lib->vba.Output[mode_lib->vba.NumberOfActivePlanes] = (enum output_encoder_class) (dout->output_type); diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 91decac50557..1540ffbe3979 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -387,6 +387,7 @@ struct vba_vars_st { /* vba mode support */ /*inputs*/ + bool EmbeddedPanel[DC__NUM_DPP__MAX]; bool SupportGFX7CompatibleTilingIn32bppAnd64bpp; double MaxHSCLRatio; double MaxVSCLRatio; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index ad8571f5a142..4c3e9cc30167 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -243,7 +243,7 @@ void dml1_extract_rq_regs( rq_regs->rq_regs_l.swath_height = dml_log2(rq_param.dlg.rq_l.swath_height); rq_regs->rq_regs_c.swath_height = dml_log2(rq_param.dlg.rq_c.swath_height); - /* FIXME: take the max between luma, chroma chunk size? + /* TODO: take the max between luma, chroma chunk size? * okay for now, as we are setting chunk_bytes to 8kb anyways */ if (rq_param.sizing.rq_l.chunk_bytes >= 32 * 1024) { /*32kb */ @@ -602,7 +602,7 @@ static void get_surf_rq_param( unsigned int log2_dpte_group_length; unsigned int func_meta_row_height, func_dpte_row_height; - /* FIXME check if ppe apply for both luma and chroma in 422 case */ + /* TODO check if ppe apply for both luma and chroma in 422 case */ if (is_chroma) { vp_width = pipe_src_param.viewport_width_c / ppe; vp_height = pipe_src_param.viewport_height_c; @@ -1141,7 +1141,7 @@ void dml1_rq_dlg_get_dlg_params( ASSERT(disp_dlg_regs->refcyc_h_blank_end < (unsigned int) dml_pow(2, 13)); disp_dlg_regs->dlg_vblank_end = interlaced ? (vblank_end / 2) : vblank_end; /* 15 bits */ - prefetch_xy_calc_in_dcfclk = 24.0; /* FIXME: ip_param */ + prefetch_xy_calc_in_dcfclk = 24.0; /* TODO: ip_param */ min_dcfclk_mhz = dlg_sys_param.deepsleep_dcfclk_mhz; t_calc_us = prefetch_xy_calc_in_dcfclk / min_dcfclk_mhz; min_ttu_vblank = dlg_sys_param.t_urg_wm_us; @@ -1182,7 +1182,7 @@ void dml1_rq_dlg_get_dlg_params( dcc_en = e2e_pipe_param.pipe.src.dcc; dual_plane = is_dual_plane( (enum source_format_class) e2e_pipe_param.pipe.src.source_format); - mode_422 = 0; /* FIXME */ + mode_422 = 0; /* TODO */ access_dir = (e2e_pipe_param.pipe.src.source_scan == dm_vert); /* vp access direction: horizontal or vertical accessed */ bytes_per_element_l = get_bytes_per_element( (enum source_format_class) e2e_pipe_param.pipe.src.source_format, @@ -1837,7 +1837,7 @@ void dml1_rq_dlg_get_dlg_params( cur0_width_ub = dml_ceil((double) cur0_src_width / (double) cur0_req_width, 1) * (double) cur0_req_width; cur0_req_per_width = cur0_width_ub / (double) cur0_req_width; - hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* FIXME: oswin to think about what to do for cursor */ + hactive_cur0 = (double) cur0_src_width / hratios_cur0; /* TODO: oswin to think about what to do for cursor */ if (vratio_pre_l <= 1.0) { refcyc_per_req_delivery_pre_cur0 = hactive_cur0 * ref_freq_to_pix_freq diff --git a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c index 5995bcdfed54..e60f760585e4 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/dc_dsc.c @@ -23,8 +23,7 @@ */ #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT -#include "dc.h" -#include "core_types.h" +#include "dc_hw_types.h" #include "dsc.h" #include <drm/drm_dp_helper.h> @@ -47,6 +46,59 @@ const struct dc_dsc_policy dsc_policy = { /* This module's internal functions */ +static uint32_t dc_dsc_bandwidth_in_kbps_from_timing( + const struct dc_crtc_timing *timing) +{ + uint32_t bits_per_channel = 0; + uint32_t kbps; + + if (timing->flags.DSC) { + kbps = (timing->pix_clk_100hz * timing->dsc_cfg.bits_per_pixel); + kbps = kbps / 160 + ((kbps % 160) ? 1 : 0); + return kbps; + } + + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + bits_per_channel = 6; + break; + case COLOR_DEPTH_888: + bits_per_channel = 8; + break; + case COLOR_DEPTH_101010: + bits_per_channel = 10; + break; + case COLOR_DEPTH_121212: + bits_per_channel = 12; + break; + case COLOR_DEPTH_141414: + bits_per_channel = 14; + break; + case COLOR_DEPTH_161616: + bits_per_channel = 16; + break; + default: + break; + } + + ASSERT(bits_per_channel != 0); + + kbps = timing->pix_clk_100hz / 10; + kbps *= bits_per_channel; + + if (timing->flags.Y_ONLY != 1) { + /*Only YOnly make reduce bandwidth by 1/3 compares to RGB*/ + kbps *= 3; + if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) + kbps /= 2; + else if (timing->pixel_encoding == PIXEL_ENCODING_YCBCR422) + kbps = kbps * 2 / 3; + } + + return kbps; + +} + static bool dsc_buff_block_size_from_dpcd(int dpcd_buff_block_size, int *buff_block_size) { @@ -178,12 +230,11 @@ static bool dsc_bpp_increment_div_from_dpcd(int bpp_increment_dpcd, uint32_t *bp } static void get_dsc_enc_caps( - const struct dc *dc, + const struct display_stream_compressor *dsc, struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz) { // This is a static HW query, so we can use any DSC - struct display_stream_compressor *dsc = dc->res_pool->dscs[0]; memset(dsc_enc_caps, 0, sizeof(struct dsc_enc_caps)); if (dsc) @@ -290,7 +341,7 @@ static void get_dsc_bandwidth_range( struct dc_dsc_bw_range *range) { /* native stream bandwidth */ - range->stream_kbps = dc_bandwidth_in_kbps_from_timing(timing); + range->stream_kbps = dc_dsc_bandwidth_in_kbps_from_timing(timing); /* max dsc target bpp */ range->max_kbps = dsc_div_by_10_round_up(max_bpp * timing->pix_clk_100hz); @@ -512,6 +563,7 @@ static bool setup_dsc_config( const struct dsc_enc_caps *dsc_enc_caps, int target_bandwidth_kbps, const struct dc_crtc_timing *timing, + int min_slice_height_override, struct dc_dsc_config *dsc_cfg) { struct dsc_enc_caps dsc_common_caps; @@ -680,7 +732,10 @@ static bool setup_dsc_config( // Slice height (i.e. number of slices per column): start with policy and pick the first one that height is divisible by. // For 4:2:0 make sure the slice height is divisible by 2 as well. - slice_height = min(dsc_policy.min_sice_height, pic_height); + if (min_slice_height_override == 0) + slice_height = min(dsc_policy.min_sice_height, pic_height); + else + slice_height = min(min_slice_height_override, pic_height); while (slice_height < pic_height && (pic_height % slice_height != 0 || (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420 && slice_height % 2 != 0))) @@ -802,7 +857,8 @@ bool dc_dsc_parse_dsc_dpcd(const uint8_t *dpcd_dsc_basic_data, const uint8_t *dp * If DSC is not possible, leave '*range' untouched. */ bool dc_dsc_compute_bandwidth_range( - const struct dc *dc, + const struct display_stream_compressor *dsc, + const uint32_t dsc_min_slice_height_override, const uint32_t min_bpp, const uint32_t max_bpp, const struct dsc_dec_dpcd_caps *dsc_sink_caps, @@ -814,16 +870,14 @@ bool dc_dsc_compute_bandwidth_range( struct dsc_enc_caps dsc_common_caps; struct dc_dsc_config config; - get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz); + get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); is_dsc_possible = intersect_dsc_caps(dsc_sink_caps, &dsc_enc_caps, timing->pixel_encoding, &dsc_common_caps); if (is_dsc_possible) - is_dsc_possible = setup_dsc_config(dsc_sink_caps, - &dsc_enc_caps, - 0, - timing, &config); + is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, 0, timing, + dsc_min_slice_height_override, &config); if (is_dsc_possible) get_dsc_bandwidth_range(min_bpp, max_bpp, &dsc_common_caps, timing, range); @@ -832,8 +886,9 @@ bool dc_dsc_compute_bandwidth_range( } bool dc_dsc_compute_config( - const struct dc *dc, + const struct display_stream_compressor *dsc, const struct dsc_dec_dpcd_caps *dsc_sink_caps, + const uint32_t dsc_min_slice_height_override, uint32_t target_bandwidth_kbps, const struct dc_crtc_timing *timing, struct dc_dsc_config *dsc_cfg) @@ -841,11 +896,11 @@ bool dc_dsc_compute_config( bool is_dsc_possible = false; struct dsc_enc_caps dsc_enc_caps; - get_dsc_enc_caps(dc, &dsc_enc_caps, timing->pix_clk_100hz); + get_dsc_enc_caps(dsc, &dsc_enc_caps, timing->pix_clk_100hz); is_dsc_possible = setup_dsc_config(dsc_sink_caps, &dsc_enc_caps, target_bandwidth_kbps, - timing, dsc_cfg); + timing, dsc_min_slice_height_override, dsc_cfg); return is_dsc_possible; } #endif /* CONFIG_DRM_AMD_DC_DSC_SUPPORT */ diff --git a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c index ca51e83f8764..76c4b12d6824 100644 --- a/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dsc/rc_calc.c @@ -177,7 +177,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com { float bpp_group; float initial_xmit_delay_factor; - int source_bpp; int padding_pixels; int i; @@ -217,8 +216,6 @@ void calc_rc_params(struct rc_params *rc, enum colour_mode cm, enum bits_per_com rc->initial_xmit_delay++; } - source_bpp = MODE_SELECT(bpc * 3, bpc * 2, bpc * 1.5); - rc->flatness_min_qp = ((bpc == BPC_8) ? (3) : ((bpc == BPC_10) ? (7) : (11))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); rc->flatness_max_qp = ((bpc == BPC_8) ? (12) : ((bpc == BPC_10) ? (16) : (20))) - ((minor_version == 1 && cm == CM_444) ? 1 : 0); rc->flatness_det_thresh = 2 << (bpc - 8); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c index f8f85490e77e..f67c18375bfd 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_base.c @@ -321,8 +321,6 @@ void dal_gpio_destroy( return; } - dal_gpio_close(*gpio); - switch ((*gpio)->id) { case GPIO_ID_DDC_DATA: kfree((*gpio)->hw_container.ddc); diff --git a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c index d03165e71dc6..92280cc05e2d 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/gpio_service.c @@ -169,7 +169,6 @@ void dal_gpio_destroy_generic_mux( return; } - dal_gpio_close(*mux); dal_gpio_destroy(mux); kfree(*mux); @@ -460,7 +459,6 @@ void dal_gpio_destroy_irq( return; } - dal_gpio_close(*irq); dal_gpio_destroy(irq); kfree(*irq); diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/Makefile b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile new file mode 100644 index 000000000000..4170b6eb9ec0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hdcp/Makefile @@ -0,0 +1,28 @@ +# Copyright 2019 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# Makefile for the 'hdcp' sub-component of DAL. +# + +HDCP_MSG = hdcp_msg.o + +AMD_DAL_HDCP_MSG = $(addprefix $(AMDDALPATH)/dc/hdcp/,$(HDCP_MSG)) + +AMD_DISPLAY_FILES += $(AMD_DAL_HDCP_MSG) diff --git a/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c new file mode 100644 index 000000000000..6f730b5bfe42 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/hdcp/hdcp_msg.c @@ -0,0 +1,324 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/slab.h> + +#include "dm_services.h" +#include "dm_helpers.h" +#include "include/hdcp_types.h" +#include "include/i2caux_interface.h" +#include "include/signal_types.h" +#include "core_types.h" +#include "dc_link_ddc.h" +#include "link_hwss.h" + +#define DC_LOGGER \ + link->ctx->logger +#define HDCP14_KSV_SIZE 5 +#define HDCP14_MAX_KSV_FIFO_SIZE 127*HDCP14_KSV_SIZE + +static const bool hdcp_cmd_is_read[] = { + [HDCP_MESSAGE_ID_READ_BKSV] = true, + [HDCP_MESSAGE_ID_READ_RI_R0] = true, + [HDCP_MESSAGE_ID_READ_PJ] = true, + [HDCP_MESSAGE_ID_WRITE_AKSV] = false, + [HDCP_MESSAGE_ID_WRITE_AINFO] = false, + [HDCP_MESSAGE_ID_WRITE_AN] = false, + [HDCP_MESSAGE_ID_READ_VH_X] = true, + [HDCP_MESSAGE_ID_READ_VH_0] = true, + [HDCP_MESSAGE_ID_READ_VH_1] = true, + [HDCP_MESSAGE_ID_READ_VH_2] = true, + [HDCP_MESSAGE_ID_READ_VH_3] = true, + [HDCP_MESSAGE_ID_READ_VH_4] = true, + [HDCP_MESSAGE_ID_READ_BCAPS] = true, + [HDCP_MESSAGE_ID_READ_BSTATUS] = true, + [HDCP_MESSAGE_ID_READ_KSV_FIFO] = true, + [HDCP_MESSAGE_ID_READ_BINFO] = true, + [HDCP_MESSAGE_ID_HDCP2VERSION] = true, + [HDCP_MESSAGE_ID_RX_CAPS] = true, + [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = false, + [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = true, + [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = false, + [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = false, + [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = true, + [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = true, + [HDCP_MESSAGE_ID_WRITE_LC_INIT] = false, + [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = true, + [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = false, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = true, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = false, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = false, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = true, + [HDCP_MESSAGE_ID_READ_RXSTATUS] = true, + [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = false +}; + +static const uint8_t hdcp_i2c_offsets[] = { + [HDCP_MESSAGE_ID_READ_BKSV] = 0x0, + [HDCP_MESSAGE_ID_READ_RI_R0] = 0x8, + [HDCP_MESSAGE_ID_READ_PJ] = 0xA, + [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10, + [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15, + [HDCP_MESSAGE_ID_WRITE_AN] = 0x18, + [HDCP_MESSAGE_ID_READ_VH_X] = 0x20, + [HDCP_MESSAGE_ID_READ_VH_0] = 0x20, + [HDCP_MESSAGE_ID_READ_VH_1] = 0x24, + [HDCP_MESSAGE_ID_READ_VH_2] = 0x28, + [HDCP_MESSAGE_ID_READ_VH_3] = 0x2C, + [HDCP_MESSAGE_ID_READ_VH_4] = 0x30, + [HDCP_MESSAGE_ID_READ_BCAPS] = 0x40, + [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41, + [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43, + [HDCP_MESSAGE_ID_READ_BINFO] = 0xFF, + [HDCP_MESSAGE_ID_HDCP2VERSION] = 0x50, + [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x60, + [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x80, + [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x60, + [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x60, + [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x80, + [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x80, + [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x60, + [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80, + [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80, + [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x70 +}; + +struct protection_properties { + bool supported; + bool (*process_transaction)( + struct dc_link *link, + struct hdcp_protection_message *message_info); +}; + +static const struct protection_properties non_supported_protection = { + .supported = false +}; + +static bool hdmi_14_process_transaction( + struct dc_link *link, + struct hdcp_protection_message *message_info) +{ + uint8_t *buff = NULL; + bool result; + const uint8_t hdcp_i2c_addr_link_primary = 0x3a; /* 0x74 >> 1*/ + const uint8_t hdcp_i2c_addr_link_secondary = 0x3b; /* 0x76 >> 1*/ + struct i2c_command i2c_command; + uint8_t offset = hdcp_i2c_offsets[message_info->msg_id]; + struct i2c_payload i2c_payloads[] = { + { true, 0, 1, &offset }, + /* actual hdcp payload, will be filled later, zeroed for now*/ + { 0 } + }; + + switch (message_info->link) { + case HDCP_LINK_SECONDARY: + i2c_payloads[0].address = hdcp_i2c_addr_link_secondary; + i2c_payloads[1].address = hdcp_i2c_addr_link_secondary; + break; + case HDCP_LINK_PRIMARY: + default: + i2c_payloads[0].address = hdcp_i2c_addr_link_primary; + i2c_payloads[1].address = hdcp_i2c_addr_link_primary; + break; + } + + if (hdcp_cmd_is_read[message_info->msg_id]) { + i2c_payloads[1].write = false; + i2c_command.number_of_payloads = ARRAY_SIZE(i2c_payloads); + i2c_payloads[1].length = message_info->length; + i2c_payloads[1].data = message_info->data; + } else { + i2c_command.number_of_payloads = 1; + buff = kzalloc(message_info->length + 1, GFP_KERNEL); + + if (!buff) + return false; + + buff[0] = offset; + memmove(&buff[1], message_info->data, message_info->length); + i2c_payloads[0].length = message_info->length + 1; + i2c_payloads[0].data = buff; + } + + i2c_command.payloads = i2c_payloads; + i2c_command.engine = I2C_COMMAND_ENGINE_HW;//only HW + i2c_command.speed = link->ddc->ctx->dc->caps.i2c_speed_in_khz; + + result = dm_helpers_submit_i2c( + link->ctx, + link, + &i2c_command); + kfree(buff); + + return result; +} + +static const struct protection_properties hdmi_14_protection = { + .supported = true, + .process_transaction = hdmi_14_process_transaction +}; + +static const uint32_t hdcp_dpcd_addrs[] = { + [HDCP_MESSAGE_ID_READ_BKSV] = 0x68000, + [HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005, + [HDCP_MESSAGE_ID_READ_PJ] = 0xFFFFFFFF, + [HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007, + [HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B, + [HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c, + [HDCP_MESSAGE_ID_READ_VH_X] = 0x68014, + [HDCP_MESSAGE_ID_READ_VH_0] = 0x68014, + [HDCP_MESSAGE_ID_READ_VH_1] = 0x68018, + [HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c, + [HDCP_MESSAGE_ID_READ_VH_3] = 0x68020, + [HDCP_MESSAGE_ID_READ_VH_4] = 0x68024, + [HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028, + [HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029, + [HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c, + [HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a, + [HDCP_MESSAGE_ID_RX_CAPS] = 0x6921d, + [HDCP_MESSAGE_ID_WRITE_AKE_INIT] = 0x69000, + [HDCP_MESSAGE_ID_READ_AKE_SEND_CERT] = 0x6900b, + [HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM] = 0x69220, + [HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM] = 0x692a0, + [HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME] = 0x692c0, + [HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO] = 0x692e0, + [HDCP_MESSAGE_ID_WRITE_LC_INIT] = 0x692f0, + [HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8, + [HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0, + [HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0, + [HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473, + [HDCP_MESSAGE_ID_READ_RXSTATUS] = 0x69493, + [HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE] = 0x69494 +}; + +static bool dpcd_access_helper( + struct dc_link *link, + uint32_t length, + uint8_t *data, + uint32_t dpcd_addr, + bool is_read) +{ + enum dc_status status; + uint32_t cur_length = 0; + uint32_t offset = 0; + uint32_t ksv_read_size = 0x6803b - 0x6802c; + + /* Read KSV, need repeatedly handle */ + if (dpcd_addr == 0x6802c) { + if (length % HDCP14_KSV_SIZE) { + DC_LOG_ERROR("%s: KsvFifo Size(%d) is not a multiple of HDCP14_KSV_SIZE(%d)\n", + __func__, + length, + HDCP14_KSV_SIZE); + } + if (length > HDCP14_MAX_KSV_FIFO_SIZE) { + DC_LOG_ERROR("%s: KsvFifo Size(%d) is greater than HDCP14_MAX_KSV_FIFO_SIZE(%d)\n", + __func__, + length, + HDCP14_MAX_KSV_FIFO_SIZE); + } + + DC_LOG_ERROR("%s: Reading %d Ksv(s) from KsvFifo\n", + __func__, + length / HDCP14_KSV_SIZE); + + while (length > 0) { + if (length > ksv_read_size) { + status = core_link_read_dpcd( + link, + dpcd_addr + offset, + data + offset, + ksv_read_size); + + data += ksv_read_size; + length -= ksv_read_size; + } else { + status = core_link_read_dpcd( + link, + dpcd_addr + offset, + data + offset, + length); + + data += length; + length = 0; + } + + if (status != DC_OK) + return false; + } + } else { + while (length > 0) { + if (length > DEFAULT_AUX_MAX_DATA_SIZE) + cur_length = DEFAULT_AUX_MAX_DATA_SIZE; + else + cur_length = length; + + if (is_read) { + status = core_link_read_dpcd( + link, + dpcd_addr + offset, + data + offset, + cur_length); + } else { + status = core_link_write_dpcd( + link, + dpcd_addr + offset, + data + offset, + cur_length); + } + + if (status != DC_OK) + return false; + + length -= cur_length; + offset += cur_length; + } + } + return true; +} + +static bool dp_11_process_transaction( + struct dc_link *link, + struct hdcp_protection_message *message_info) +{ + return dpcd_access_helper( + link, + message_info->length, + message_info->data, + hdcp_dpcd_addrs[message_info->msg_id], + hdcp_cmd_is_read[message_info->msg_id]); +} + +static const struct protection_properties dp_11_protection = { + .supported = true, + .process_transaction = dp_11_process_transaction +}; + diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index f189307750ab..a831079607cd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -52,7 +52,9 @@ void enable_surface_flip_reporting(struct dc_plane_state *plane_state, #include "clock_source.h" #include "audio.h" #include "dm_pp_smu.h" - +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "dm_cp_psp.h" +#endif /************ link *****************/ struct link_init_data { @@ -231,6 +233,7 @@ struct resource_pool { struct dcn_fe_bandwidth { int dppclk_khz; + }; struct stream_resource { @@ -395,10 +398,6 @@ struct dc_state { struct clk_mgr *clk_mgr; - struct { - bool full_update_needed : 1; - } commit_hints; - struct kref refcount; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h index b1fab251c09b..14716ba35662 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h @@ -95,6 +95,9 @@ bool dal_ddc_service_query_ddc_data( uint8_t *read_buf, uint32_t read_size); +bool dal_ddc_submit_aux_command(struct ddc_service *ddc, + struct aux_payload *payload); + int dc_link_aux_transfer_raw(struct ddc_service *ddc, struct aux_payload *payload, enum aux_channel_operation_result *operation_result); @@ -102,6 +105,9 @@ int dc_link_aux_transfer_raw(struct ddc_service *ddc, bool dc_link_aux_transfer_with_retries(struct ddc_service *ddc, struct aux_payload *payload); +enum dc_status dc_link_aux_configure_timeout(struct ddc_service *ddc, + uint32_t timeout); + void dal_ddc_service_write_scdc_data( struct ddc_service *ddc_service, uint32_t pix_clk, diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index 08a4df2c61a8..045138dbdccb 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -28,6 +28,8 @@ #define LINK_TRAINING_ATTEMPTS 4 #define LINK_TRAINING_RETRY_DELAY 50 /* ms */ +#define LINK_AUX_DEFAULT_EXTENDED_TIMEOUT_PERIOD 32000 /*us*/ +#define LINK_AUX_DEFAULT_TIMEOUT_PERIOD 400 /*us*/ struct dc_link; struct dc_stream_state; @@ -43,6 +45,9 @@ bool dp_verify_link_cap_with_retries( struct dc_link_settings *known_limit_link_setting, int attempts); +bool dp_verify_mst_link_cap( + struct dc_link *link); + bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h index e79cd4e92919..e77b3a76766d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h @@ -140,6 +140,9 @@ struct write_command_context { struct aux_engine_funcs { + bool (*configure_timeout)( + struct ddc_service *ddc, + uint32_t timeout); void (*destroy)( struct aux_engine **ptr); bool (*acquire_engine)( diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 76f9ad1b23df..4e18e77dcf42 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -47,7 +47,7 @@ #ifdef CONFIG_DRM_AMD_DC_DCN2_1 /* Will these bw structures be ASIC specific? */ -#define MAX_NUM_DPM_LVL 4 +#define MAX_NUM_DPM_LVL 8 #define WM_SET_COUNT 4 @@ -180,13 +180,19 @@ struct clk_mgr_funcs { struct dc_state *context, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); + + bool (*are_clock_states_equal) (struct dc_clocks *a, + struct dc_clocks *b); + void (*notify_wm_ranges)(struct clk_mgr *clk_mgr); }; struct clk_mgr { struct dc_context *ctx; struct clk_mgr_funcs *funcs; struct dc_clocks clks; + bool psr_allow_active_cache; int dprefclk_khz; // Used by program pixel clock in clock source funcs, need to figureout where this goes + int dentist_vco_freq_khz; #ifdef CONFIG_DRM_AMD_DC_DCN2_1 struct clk_bw_params *bw_params; #endif @@ -199,4 +205,8 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr); +void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr); + +void clk_mgr_optimize_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr); + #endif /* __DAL_CLK_MGR_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h index 7dd46eb96d67..a17a77192690 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h @@ -184,6 +184,21 @@ struct clk_mgr_registers { uint32_t MP1_SMN_C2PMSG_91; }; +enum clock_type { + clock_type_dispclk = 1, + clock_type_dcfclk, + clock_type_socclk, + clock_type_pixelclk, + clock_type_phyclk, + clock_type_dppclk, + clock_type_fclk, + clock_type_dcfdsclk, + clock_type_dscclk, + clock_type_uclk, + clock_type_dramclk, +}; + + struct state_dependent_clocks { int display_clk_khz; int pixel_clk_khz; @@ -210,8 +225,6 @@ struct clk_mgr_internal { struct state_dependent_clocks max_clks_by_state[DM_PP_CLOCKS_MAX_STATES]; /*TODO: figure out which of the below fields should be here vs in asic specific portion */ - int dentist_vco_freq_khz; - /* Cache the status of DFS-bypass feature*/ bool dfs_bypass_enabled; /* True if the DFS-bypass feature is enabled and active. */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index d8e744f366e5..05ee5295d2c1 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -38,8 +38,7 @@ struct dccg { struct dccg_funcs { void (*update_dpp_dto)(struct dccg *dccg, int dpp_inst, - int req_dppclk, - bool reduce_divider_only); + int req_dppclk); void (*get_dccg_ref_freq)(struct dccg *dccg, unsigned int xtalin_freq_inKhz, unsigned int *dccg_ref_freq_inKhz); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h index a6297219d7fc..c81a17aeaa25 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h @@ -147,6 +147,7 @@ struct hubbub_funcs { bool (*is_allow_self_refresh_enabled)(struct hubbub *hubbub); void (*allow_self_refresh_control)(struct hubbub *hubbub, bool allow); + void (*apply_DEDCN21_147_wa)(struct hubbub *hubbub); }; struct hubbub { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h index 1ddb1c6fa149..c6ff3d78b435 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h @@ -28,7 +28,11 @@ #include "dc_dsc.h" #include "dc_hw_types.h" -#include "dc_dp_types.h" +#include "dc_types.h" +/* do not include any other headers + * or else it might break Edid Utility functionality. + */ + /* Input parameters for configuring DSC from the outside of DSC */ struct dsc_config { @@ -81,12 +85,6 @@ struct dsc_enc_caps { uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */ }; -struct display_stream_compressor { - const struct dsc_funcs *funcs; - struct dc_context *ctx; - int inst; -}; - struct dsc_funcs { void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index abb4e4237fb6..b21909216fb6 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -184,6 +184,10 @@ struct link_encoder_funcs { bool (*fec_is_active)(struct link_encoder *enc); #endif bool (*is_in_alt_mode) (struct link_encoder *enc); + + void (*get_max_link_cap)(struct link_encoder *enc, + struct dc_link_settings *link_settings); + enum signal_type (*get_dig_mode)( struct link_encoder *enc); }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h index e8668388581b..67b610d6d91f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mem_input.h @@ -43,6 +43,7 @@ struct dcn_watermarks { #if defined(CONFIG_DRM_AMD_DC_DCN2_1) uint32_t frac_urg_bw_nom; uint32_t frac_urg_bw_flip; + int32_t urgent_latency_ns; #endif struct cstate_pstate_watermarks_st cstate_pstate; }; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index 957e9047381a..18def2b6fafe 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -208,6 +208,7 @@ struct output_pixel_processor { struct mpc_tree mpc_tree_params; bool mpcc_disconnect_pending[MAX_PIPES]; const struct opp_funcs *funcs; + uint32_t dyn_expansion; }; enum fmt_stereo_action { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index fe9b7a10a1c3..6305e388612a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -214,6 +214,11 @@ struct stream_encoder_funcs { unsigned int (*dig_source_otg)( struct stream_encoder *enc); + bool (*dp_get_pixel_format)( + struct stream_encoder *enc, + enum dc_pixel_encoding *encoding, + enum dc_color_depth *depth); + #if defined(CONFIG_DRM_AMD_DC_DCN2_0) #ifdef CONFIG_DRM_AMD_DC_DSC_SUPPORT void (*enc_read_state)(struct stream_encoder *enc, struct enc_state *s); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index 6196cc32356e..27c73caf74ee 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -261,6 +261,8 @@ struct timing_generator_funcs { void (*program_manual_trigger)(struct timing_generator *optc); void (*setup_manual_trigger)(struct timing_generator *optc); + bool (*get_hw_timing)(struct timing_generator *optc, + struct dc_crtc_timing *hw_crtc_timing); void (*set_vtg_params)(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h index 3a938cd414ea..d39c1e11def5 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h @@ -114,6 +114,9 @@ struct hw_sequencer_funcs { int opp_id); #if defined(CONFIG_DRM_AMD_DC_DCN2_0) + void (*program_front_end_for_ctx)( + struct dc *dc, + struct dc_state *context); void (*program_triplebuffer)( const struct dc *dc, struct pipe_ctx *pipe_ctx, @@ -229,6 +232,13 @@ struct hw_sequencer_funcs { struct dc *dc, struct dc_state *context); + void (*exit_optimized_pwr_state)( + const struct dc *dc, + struct dc_state *context); + void (*optimize_pwr_state)( + const struct dc *dc, + struct dc_state *context); + #if defined(CONFIG_DRM_AMD_DC_DCN2_0) bool (*update_bandwidth)( struct dc *dc, @@ -321,10 +331,12 @@ struct hw_sequencer_funcs { struct dc_state *context); void (*update_writeback)(struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info); + struct dc_writeback_info *wb_info, + struct dc_state *context); void (*enable_writeback)(struct dc *dc, const struct dc_stream_status *stream_status, - struct dc_writeback_info *wb_info); + struct dc_writeback_info *wb_info, + struct dc_state *context); void (*disable_writeback)(struct dc *dc, unsigned int dwb_pipe_inst); #endif @@ -337,6 +349,9 @@ struct hw_sequencer_funcs { enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg); +#if defined(CONFIG_DRM_AMD_DC_DCN2_1) + bool (*s0i3_golden_init_wa)(struct dc *dc); +#endif }; void color_space_to_black_color( diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 18961707db23..9ad49da50a17 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -31,6 +31,8 @@ #define DP_BRANCH_DEVICE_ID_0022B9 0x0022B9 #define DP_BRANCH_DEVICE_ID_00001A 0x00001A #define DP_BRANCH_DEVICE_ID_0080E1 0x0080e1 +#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24 +#define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C enum ddc_result { DDC_RESULT_UNKNOWN = 0, diff --git a/drivers/gpu/drm/amd/display/include/hdcp_types.h b/drivers/gpu/drm/amd/display/include/hdcp_types.h new file mode 100644 index 000000000000..f31e6befc8d6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/include/hdcp_types.h @@ -0,0 +1,96 @@ +/* +* Copyright 2019 Advanced Micro Devices, Inc. +* +* Permission is hereby granted, free of charge, to any person obtaining a +* copy of this software and associated documentation files (the "Software"), +* to deal in the Software without restriction, including without limitation +* the rights to use, copy, modify, merge, publish, distribute, sublicense, +* and/or sell copies of the Software, and to permit persons to whom the +* Software is furnished to do so, subject to the following conditions: +* +* The above copyright notice and this permission notice shall be included in +* all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +* OTHER DEALINGS IN THE SOFTWARE. +* +* Authors: AMD +* +*/ + +#ifndef __DC_HDCP_TYPES_H__ +#define __DC_HDCP_TYPES_H__ + +enum hdcp_message_id { + HDCP_MESSAGE_ID_INVALID = -1, + + /* HDCP 1.4 */ + + HDCP_MESSAGE_ID_READ_BKSV = 0, + /* HDMI is called Ri', DP is called R0' */ + HDCP_MESSAGE_ID_READ_RI_R0, + HDCP_MESSAGE_ID_READ_PJ, + HDCP_MESSAGE_ID_WRITE_AKSV, + HDCP_MESSAGE_ID_WRITE_AINFO, + HDCP_MESSAGE_ID_WRITE_AN, + HDCP_MESSAGE_ID_READ_VH_X, + HDCP_MESSAGE_ID_READ_VH_0, + HDCP_MESSAGE_ID_READ_VH_1, + HDCP_MESSAGE_ID_READ_VH_2, + HDCP_MESSAGE_ID_READ_VH_3, + HDCP_MESSAGE_ID_READ_VH_4, + HDCP_MESSAGE_ID_READ_BCAPS, + HDCP_MESSAGE_ID_READ_BSTATUS, + HDCP_MESSAGE_ID_READ_KSV_FIFO, + HDCP_MESSAGE_ID_READ_BINFO, + + /* HDCP 2.2 */ + + HDCP_MESSAGE_ID_HDCP2VERSION, + HDCP_MESSAGE_ID_RX_CAPS, + HDCP_MESSAGE_ID_WRITE_AKE_INIT, + HDCP_MESSAGE_ID_READ_AKE_SEND_CERT, + HDCP_MESSAGE_ID_WRITE_AKE_NO_STORED_KM, + HDCP_MESSAGE_ID_WRITE_AKE_STORED_KM, + HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME, + HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO, + HDCP_MESSAGE_ID_WRITE_LC_INIT, + HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME, + HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS, + HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST, + HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK, + HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE, + HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY, + HDCP_MESSAGE_ID_READ_RXSTATUS, + HDCP_MESSAGE_ID_WRITE_CONTENT_STREAM_TYPE, + + HDCP_MESSAGE_ID_MAX +}; + +enum hdcp_version { + HDCP_Unknown = 0, + HDCP_VERSION_14, + HDCP_VERSION_22, +}; + +enum hdcp_link { + HDCP_LINK_PRIMARY, + HDCP_LINK_SECONDARY +}; + +struct hdcp_protection_message { + enum hdcp_version version; + /* relevant only for DVI */ + enum hdcp_link link; + enum hdcp_message_id msg_id; + uint32_t length; + uint8_t max_retries; + uint8_t *data; +}; + +#endif diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c index 2d8f14b69117..1de4805cb8c7 100644 --- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c +++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c @@ -373,7 +373,42 @@ static struct fixed31_32 translate_from_linear_space( return dc_fixpt_mul(args->arg, args->a1); } -static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg) + +static struct fixed31_32 translate_from_linear_space_long( + struct translate_from_linear_space_args *args) +{ + const struct fixed31_32 one = dc_fixpt_from_int(1); + + if (dc_fixpt_lt(one, args->arg)) + return one; + + if (dc_fixpt_le(args->arg, dc_fixpt_neg(args->a0))) + return dc_fixpt_sub( + args->a2, + dc_fixpt_mul( + dc_fixpt_add( + one, + args->a3), + dc_fixpt_pow( + dc_fixpt_neg(args->arg), + dc_fixpt_recip(args->gamma)))); + else if (dc_fixpt_le(args->a0, args->arg)) + return dc_fixpt_sub( + dc_fixpt_mul( + dc_fixpt_add( + one, + args->a3), + dc_fixpt_pow( + args->arg, + dc_fixpt_recip(args->gamma))), + args->a2); + else + return dc_fixpt_mul( + args->arg, + args->a1); +} + +static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg, bool use_eetf) { struct fixed31_32 gamma = dc_fixpt_from_fraction(22, 10); @@ -384,9 +419,13 @@ static struct fixed31_32 calculate_gamma22(struct fixed31_32 arg) scratch_gamma_args.a3 = dc_fixpt_zero; scratch_gamma_args.gamma = gamma; + if (use_eetf) + return translate_from_linear_space_long(&scratch_gamma_args); + return translate_from_linear_space(&scratch_gamma_args); } + static struct fixed31_32 translate_to_linear_space( struct fixed31_32 arg, struct fixed31_32 a0, @@ -920,11 +959,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, if (fs_params->max_display < 100) // cap at 100 at the top max_display = dc_fixpt_from_int(100); - if (fs_params->min_content < fs_params->min_display) - use_eetf = true; - else - min_content = min_display; - + // only max used, we don't adjust min luminance if (fs_params->max_content > fs_params->max_display) use_eetf = true; else @@ -950,7 +985,7 @@ static bool build_freesync_hdr(struct pwl_float_data_ex *rgb_regamma, if (dc_fixpt_lt(scaledX, dc_fixpt_zero)) output = dc_fixpt_zero; else - output = calculate_gamma22(scaledX); + output = calculate_gamma22(scaledX, use_eetf); rgb->r = output; rgb->g = output; @@ -2173,5 +2208,3 @@ bool mod_color_calculate_degamma_curve(enum dc_transfer_func_predefined trans, rgb_degamma_alloc_fail: return ret; } - - diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c index ec70c9b12e1a..16e69bbc69aa 100644 --- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c +++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c @@ -37,8 +37,8 @@ #define STATIC_SCREEN_RAMP_DELTA_REFRESH_RATE_PER_FRAME ((1000 / 60) * 65) /* Number of elements in the render times cache array */ #define RENDER_TIMES_MAX_COUNT 10 -/* Threshold to exit BTR (to avoid frequent enter-exits at the lower limit) */ -#define BTR_EXIT_MARGIN 2000 +/* Threshold to exit/exit BTR (to avoid frequent enter-exits at the lower limit) */ +#define BTR_MAX_MARGIN 2500 /* Threshold to change BTR multiplier (to avoid frequent changes) */ #define BTR_DRIFT_MARGIN 2000 /*Threshold to exit fixed refresh rate*/ @@ -234,6 +234,10 @@ static void update_v_total_for_static_ramp( current_duration_in_us) * (stream->timing.pix_clk_100hz / 10)), stream->timing.h_total), 1000); + /* v_total cannot be less than nominal */ + if (v_total < stream->timing.v_total) + v_total = stream->timing.v_total; + in_out_vrr->adjust.v_total_min = v_total; in_out_vrr->adjust.v_total_max = v_total; } @@ -250,24 +254,22 @@ static void apply_below_the_range(struct core_freesync *core_freesync, unsigned int delta_from_mid_point_in_us_1 = 0xFFFFFFFF; unsigned int delta_from_mid_point_in_us_2 = 0xFFFFFFFF; unsigned int frames_to_insert = 0; - unsigned int min_frame_duration_in_ns = 0; - unsigned int max_render_time_in_us = in_out_vrr->max_duration_in_us; unsigned int delta_from_mid_point_delta_in_us; - - min_frame_duration_in_ns = ((unsigned int) (div64_u64( - (1000000000ULL * 1000000), - in_out_vrr->max_refresh_in_uhz))); + unsigned int max_render_time_in_us = + in_out_vrr->max_duration_in_us - in_out_vrr->btr.margin_in_us; /* Program BTR */ - if (last_render_time_in_us + BTR_EXIT_MARGIN < max_render_time_in_us) { + if ((last_render_time_in_us + in_out_vrr->btr.margin_in_us / 2) < max_render_time_in_us) { /* Exit Below the Range */ if (in_out_vrr->btr.btr_active) { in_out_vrr->btr.frame_counter = 0; in_out_vrr->btr.btr_active = false; } - } else if (last_render_time_in_us > max_render_time_in_us) { + } else if (last_render_time_in_us > (max_render_time_in_us + in_out_vrr->btr.margin_in_us / 2)) { /* Enter Below the Range */ - in_out_vrr->btr.btr_active = true; + if (!in_out_vrr->btr.btr_active) { + in_out_vrr->btr.btr_active = true; + } } /* BTR set to "not active" so disengage */ @@ -323,7 +325,9 @@ static void apply_below_the_range(struct core_freesync *core_freesync, /* Choose number of frames to insert based on how close it * can get to the mid point of the variable range. */ - if (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2) { + if ((frame_time_in_us / mid_point_frames_ceil) > in_out_vrr->min_duration_in_us && + (delta_from_mid_point_in_us_1 < delta_from_mid_point_in_us_2 || + mid_point_frames_floor < 2)) { frames_to_insert = mid_point_frames_ceil; delta_from_mid_point_delta_in_us = delta_from_mid_point_in_us_2 - delta_from_mid_point_in_us_1; @@ -339,7 +343,7 @@ static void apply_below_the_range(struct core_freesync *core_freesync, if (in_out_vrr->btr.frames_to_insert != 0 && delta_from_mid_point_delta_in_us < BTR_DRIFT_MARGIN) { if (((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) < - in_out_vrr->max_duration_in_us) && + max_render_time_in_us) && ((last_render_time_in_us / in_out_vrr->btr.frames_to_insert) > in_out_vrr->min_duration_in_us)) frames_to_insert = in_out_vrr->btr.frames_to_insert; @@ -743,6 +747,10 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, nominal_field_rate_in_uhz = mod_freesync_calc_nominal_field_rate(stream); + /* Rounded to the nearest Hz */ + nominal_field_rate_in_uhz = 1000000ULL * + div_u64(nominal_field_rate_in_uhz + 500000, 1000000); + min_refresh_in_uhz = in_config->min_refresh_in_uhz; max_refresh_in_uhz = in_config->max_refresh_in_uhz; @@ -788,6 +796,11 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, refresh_range = in_out_vrr->max_refresh_in_uhz - in_out_vrr->min_refresh_in_uhz; + in_out_vrr->btr.margin_in_us = in_out_vrr->max_duration_in_us - + 2 * in_out_vrr->min_duration_in_us; + if (in_out_vrr->btr.margin_in_us > BTR_MAX_MARGIN) + in_out_vrr->btr.margin_in_us = BTR_MAX_MARGIN; + in_out_vrr->supported = true; } @@ -803,6 +816,7 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync, in_out_vrr->btr.inserted_duration_in_us = 0; in_out_vrr->btr.frames_to_insert = 0; in_out_vrr->btr.frame_counter = 0; + in_out_vrr->btr.mid_point_in_us = (in_out_vrr->min_duration_in_us + in_out_vrr->max_duration_in_us) / 2; @@ -975,13 +989,9 @@ void mod_freesync_get_settings(struct mod_freesync *mod_freesync, unsigned int *inserted_frames, unsigned int *inserted_duration_in_us) { - struct core_freesync *core_freesync = NULL; - if (mod_freesync == NULL) return; - core_freesync = MOD_FREESYNC_TO_CORE(mod_freesync); - if (vrr->supported) { *v_total_min = vrr->adjust.v_total_min; *v_total_max = vrr->adjust.v_total_max; @@ -996,14 +1006,13 @@ unsigned long long mod_freesync_calc_nominal_field_rate( const struct dc_stream_state *stream) { unsigned long long nominal_field_rate_in_uhz = 0; + unsigned int total = stream->timing.h_total * stream->timing.v_total; - /* Calculate nominal field rate for stream */ + /* Calculate nominal field rate for stream, rounded up to nearest integer */ nominal_field_rate_in_uhz = stream->timing.pix_clk_100hz / 10; nominal_field_rate_in_uhz *= 1000ULL * 1000ULL * 1000ULL; - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.h_total); - nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, - stream->timing.v_total); + + nominal_field_rate_in_uhz = div_u64(nominal_field_rate_in_uhz, total); return nominal_field_rate_in_uhz; } diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/Makefile b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile new file mode 100644 index 000000000000..1c3c6d47973a --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/Makefile @@ -0,0 +1,32 @@ +# +# Copyright 2019 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# +# Makefile for the 'hdcp' sub-module of DAL. +# + +HDCP = hdcp_ddc.o hdcp_log.o hdcp_psp.o hdcp.o \ + hdcp1_execution.o hdcp1_transition.o + +AMD_DAL_HDCP = $(addprefix $(AMDDALPATH)/modules/hdcp/,$(HDCP)) +#$(info ************ DAL-HDCP_MAKEFILE ************) + +AMD_DISPLAY_FILES += $(AMD_DAL_HDCP) diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c new file mode 100644 index 000000000000..d7ac445dec6f --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c @@ -0,0 +1,426 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +static void push_error_status(struct mod_hdcp *hdcp, + enum mod_hdcp_status status) +{ + struct mod_hdcp_trace *trace = &hdcp->connection.trace; + + if (trace->error_count < MAX_NUM_OF_ERROR_TRACE) { + trace->errors[trace->error_count].status = status; + trace->errors[trace->error_count].state_id = hdcp->state.id; + trace->error_count++; + HDCP_ERROR_TRACE(hdcp, status); + } + + hdcp->connection.hdcp1_retry_count++; +} + +static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp) +{ + int i, display_enabled = 0; + + /* if all displays on the link are disabled, hdcp is not desired */ + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE && + !hdcp->connection.displays[i].adjust.disable) { + display_enabled = 1; + break; + } + } + + return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) && + display_enabled && !hdcp->connection.link.adjust.hdcp1.disable; +} + +static enum mod_hdcp_status execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + union mod_hdcp_transition_input *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (is_in_initialized_state(hdcp)) { + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + /* initialize transition input */ + memset(input, 0, sizeof(union mod_hdcp_transition_input)); + } else if (is_in_cp_not_desired_state(hdcp)) { + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + /* update topology event if hdcp is not desired */ + status = mod_hdcp_add_display_topology(hdcp); + } else if (is_in_hdcp1_states(hdcp)) { + status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1); + } else if (is_in_hdcp1_dp_states(hdcp)) { + status = mod_hdcp_hdcp1_dp_execution(hdcp, + event_ctx, &input->hdcp1); + } +out: + return status; +} + +static enum mod_hdcp_status transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + union mod_hdcp_transition_input *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->unexpected_event) + goto out; + + if (is_in_initialized_state(hdcp)) { + if (is_dp_hdcp(hdcp)) + if (is_cp_desired_hdcp1(hdcp)) { + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A0_DETERMINE_RX_HDCP_CAPABLE); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); + } + else if (is_hdmi_dvi_sl_hdcp(hdcp)) + if (is_cp_desired_hdcp1(hdcp)) { + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A0_WAIT_FOR_ACTIVE_RX); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); + } + else { + callback_in_ms(0, output); + set_state_id(hdcp, output, HDCP_CP_NOT_DESIRED); + } + } else if (is_in_cp_not_desired_state(hdcp)) { + increment_stay_counter(hdcp); + } else if (is_in_hdcp1_states(hdcp)) { + status = mod_hdcp_hdcp1_transition(hdcp, + event_ctx, &input->hdcp1, output); + } else if (is_in_hdcp1_dp_states(hdcp)) { + status = mod_hdcp_hdcp1_dp_transition(hdcp, + event_ctx, &input->hdcp1, output); + } else { + status = MOD_HDCP_STATUS_INVALID_STATE; + } +out: + return status; +} + +static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (is_hdcp1(hdcp)) { + if (hdcp->auth.trans_input.hdcp1.create_session != UNKNOWN) + mod_hdcp_hdcp1_destroy_session(hdcp); + + if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) { + status = mod_hdcp_remove_display_topology(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) { + output->callback_needed = 0; + output->watchdog_timer_needed = 0; + goto out; + } + } + HDCP_TOP_RESET_AUTH_TRACE(hdcp); + memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); + memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); + set_state_id(hdcp, output, HDCP_INITIALIZED); + } else if (is_in_cp_not_desired_state(hdcp)) { + status = mod_hdcp_remove_display_topology(hdcp); + if (status != MOD_HDCP_STATUS_SUCCESS) { + output->callback_needed = 0; + output->watchdog_timer_needed = 0; + goto out; + } + HDCP_TOP_RESET_AUTH_TRACE(hdcp); + memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication)); + memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state)); + set_state_id(hdcp, output, HDCP_INITIALIZED); + } + +out: + /* stop callback and watchdog requests from previous authentication*/ + output->watchdog_timer_stop = 1; + output->callback_stop = 1; + return status; +} + +static enum mod_hdcp_status reset_connection(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + memset(output, 0, sizeof(struct mod_hdcp_output)); + + status = reset_authentication(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + if (current_state(hdcp) != HDCP_UNINITIALIZED) { + HDCP_TOP_RESET_CONN_TRACE(hdcp); + set_state_id(hdcp, output, HDCP_UNINITIALIZED); + } + memset(&hdcp->connection, 0, sizeof(hdcp->connection)); +out: + return status; +} + +/* + * Implementation of functions in mod_hdcp.h + */ +size_t mod_hdcp_get_memory_size(void) +{ + return sizeof(struct mod_hdcp); +} + +enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp, + struct mod_hdcp_config *config) +{ + struct mod_hdcp_output output; + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + memset(hdcp, 0, sizeof(struct mod_hdcp)); + memset(&output, 0, sizeof(output)); + hdcp->config = *config; + HDCP_TOP_INTERFACE_TRACE(hdcp); + status = reset_connection(hdcp, &output); + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + return status; +} + +enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_output output; + + HDCP_TOP_INTERFACE_TRACE(hdcp); + memset(&output, 0, sizeof(output)); + status = reset_connection(hdcp, &output); + if (status == MOD_HDCP_STATUS_SUCCESS) + memset(hdcp, 0, sizeof(struct mod_hdcp)); + else + push_error_status(hdcp, status); + return status; +} + +enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, + struct mod_hdcp_link *link, struct mod_hdcp_display *display, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_display *display_container = NULL; + + HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, display->index); + memset(output, 0, sizeof(struct mod_hdcp_output)); + + /* skip inactive display */ + if (display->state != MOD_HDCP_DISPLAY_ACTIVE) { + status = MOD_HDCP_STATUS_SUCCESS; + goto out; + } + + /* check existing display container */ + if (get_active_display_at_index(hdcp, display->index)) { + status = MOD_HDCP_STATUS_SUCCESS; + goto out; + } + + /* find an empty display container */ + display_container = get_empty_display_container(hdcp); + if (!display_container) { + status = MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND; + goto out; + } + + /* reset existing authentication status */ + status = reset_authentication(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + /* add display to connection */ + hdcp->connection.link = *link; + *display_container = *display; + + /* reset retry counters */ + reset_retry_counts(hdcp); + + /* reset error trace */ + memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); + + /* request authentication */ + if (current_state(hdcp) != HDCP_INITIALIZED) + set_state_id(hdcp, output, HDCP_INITIALIZED); + callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, output); +out: + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + + return status; +} + +enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_display *display = NULL; + + HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, index); + memset(output, 0, sizeof(struct mod_hdcp_output)); + + /* find display in connection */ + display = get_active_display_at_index(hdcp, index); + if (!display) { + status = MOD_HDCP_STATUS_SUCCESS; + goto out; + } + + /* stop current authentication */ + status = reset_authentication(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + /* remove display */ + display->state = MOD_HDCP_DISPLAY_INACTIVE; + + /* clear retry counters */ + reset_retry_counts(hdcp); + + /* reset error trace */ + memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace)); + + /* request authentication for remaining displays*/ + if (get_active_display_count(hdcp) > 0) + callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000, + output); +out: + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + return status; +} + +enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_display_query *query) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_display *display = NULL; + + /* find display in connection */ + display = get_active_display_at_index(hdcp, index); + if (!display) { + status = MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + goto out; + } + + /* populate query */ + query->link = &hdcp->connection.link; + query->display = display; + query->trace = &hdcp->connection.trace; + query->encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + mod_hdcp_hdcp1_get_link_encryption_status(hdcp, &query->encryption_status); + +out: + return status; +} + +enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + HDCP_TOP_INTERFACE_TRACE(hdcp); + status = reset_connection(hdcp, output); + if (status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, status); + + return status; +} + +enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp, + enum mod_hdcp_event event, struct mod_hdcp_output *output) +{ + enum mod_hdcp_status exec_status, trans_status, reset_status, status; + struct mod_hdcp_event_context event_ctx; + + HDCP_EVENT_TRACE(hdcp, event); + memset(output, 0, sizeof(struct mod_hdcp_output)); + memset(&event_ctx, 0, sizeof(struct mod_hdcp_event_context)); + event_ctx.event = event; + + /* execute and transition */ + exec_status = execution(hdcp, &event_ctx, &hdcp->auth.trans_input); + trans_status = transition( + hdcp, &event_ctx, &hdcp->auth.trans_input, output); + if (trans_status == MOD_HDCP_STATUS_SUCCESS) { + status = MOD_HDCP_STATUS_SUCCESS; + } else if (exec_status == MOD_HDCP_STATUS_SUCCESS) { + status = MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE; + push_error_status(hdcp, status); + } else { + status = exec_status; + push_error_status(hdcp, status); + } + + /* reset authentication if needed */ + if (trans_status == MOD_HDCP_STATUS_RESET_NEEDED) { + HDCP_FULL_DDC_TRACE(hdcp); + reset_status = reset_authentication(hdcp, output); + if (reset_status != MOD_HDCP_STATUS_SUCCESS) + push_error_status(hdcp, reset_status); + } + return status; +} + +enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode( + enum signal_type signal) +{ + enum mod_hdcp_operation_mode mode = MOD_HDCP_MODE_OFF; + + switch (signal) { + case SIGNAL_TYPE_DVI_SINGLE_LINK: + case SIGNAL_TYPE_HDMI_TYPE_A: + mode = MOD_HDCP_MODE_DEFAULT; + break; + case SIGNAL_TYPE_EDP: + case SIGNAL_TYPE_DISPLAY_PORT: + mode = MOD_HDCP_MODE_DP; + break; + case SIGNAL_TYPE_DISPLAY_PORT_MST: + mode = MOD_HDCP_MODE_DP_MST; + break; + default: + break; + }; + + return mode; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h new file mode 100644 index 000000000000..5664bc0b5bd0 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h @@ -0,0 +1,442 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef HDCP_H_ +#define HDCP_H_ + +#include "mod_hdcp.h" +#include "hdcp_log.h" + +#define BCAPS_READY_MASK 0x20 +#define BCAPS_REPEATER_MASK 0x40 +#define BSTATUS_DEVICE_COUNT_MASK 0X007F +#define BSTATUS_MAX_DEVS_EXCEEDED_MASK 0x0080 +#define BSTATUS_MAX_CASCADE_EXCEEDED_MASK 0x0800 +#define BCAPS_HDCP_CAPABLE_MASK_DP 0x01 +#define BCAPS_REPEATER_MASK_DP 0x02 +#define BSTATUS_READY_MASK_DP 0x01 +#define BSTATUS_R0_P_AVAILABLE_MASK_DP 0x02 +#define BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x04 +#define BSTATUS_REAUTH_REQUEST_MASK_DP 0x08 +#define BINFO_DEVICE_COUNT_MASK_DP 0X007F +#define BINFO_MAX_DEVS_EXCEEDED_MASK_DP 0x0080 +#define BINFO_MAX_CASCADE_EXCEEDED_MASK_DP 0x0800 + +#define RXSTATUS_MSG_SIZE_MASK 0x03FF +#define RXSTATUS_READY_MASK 0x0400 +#define RXSTATUS_REAUTH_REQUEST_MASK 0x0800 +#define RXIDLIST_DEVICE_COUNT_LOWER_MASK 0xf0 +#define RXIDLIST_DEVICE_COUNT_UPPER_MASK 0x01 +#define RXCAPS_BYTE0_HDCP_CAPABLE_MASK_DP 0x02 +#define RXSTATUS_READY_MASK_DP 0x0001 +#define RXSTATUS_H_P_AVAILABLE_MASK_DP 0x0002 +#define RXSTATUS_PAIRING_AVAILABLE_MASK_DP 0x0004 +#define RXSTATUS_REAUTH_REQUEST_MASK_DP 0x0008 +#define RXSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP 0x0010 + +enum mod_hdcp_trans_input_result { + UNKNOWN = 0, + PASS, + FAIL +}; + +struct mod_hdcp_transition_input_hdcp1 { + uint8_t bksv_read; + uint8_t bksv_validation; + uint8_t add_topology; + uint8_t create_session; + uint8_t an_write; + uint8_t aksv_write; + uint8_t ainfo_write; + uint8_t bcaps_read; + uint8_t r0p_read; + uint8_t rx_validation; + uint8_t encryption; + uint8_t link_maintenance; + uint8_t ready_check; + uint8_t bstatus_read; + uint8_t max_cascade_check; + uint8_t max_devs_check; + uint8_t device_count_check; + uint8_t ksvlist_read; + uint8_t vp_read; + uint8_t ksvlist_vp_validation; + + uint8_t hdcp_capable_dp; + uint8_t binfo_read_dp; + uint8_t r0p_available_dp; + uint8_t link_integiry_check; + uint8_t reauth_request_check; + uint8_t stream_encryption_dp; +}; + +union mod_hdcp_transition_input { + struct mod_hdcp_transition_input_hdcp1 hdcp1; +}; + +struct mod_hdcp_message_hdcp1 { + uint8_t an[8]; + uint8_t aksv[5]; + uint8_t ainfo; + uint8_t bksv[5]; + uint16_t r0p; + uint8_t bcaps; + uint16_t bstatus; + uint8_t ksvlist[635]; + uint16_t ksvlist_size; + uint8_t vp[20]; + + uint16_t binfo_dp; +}; + +union mod_hdcp_message { + struct mod_hdcp_message_hdcp1 hdcp1; +}; + +struct mod_hdcp_auth_counters { + uint8_t stream_management_retry_count; +}; + +/* contains values per connection */ +struct mod_hdcp_connection { + struct mod_hdcp_link link; + struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS]; + uint8_t is_repeater; + uint8_t is_km_stored; + struct mod_hdcp_trace trace; + uint8_t hdcp1_retry_count; +}; + +/* contains values per authentication cycle */ +struct mod_hdcp_authentication { + uint32_t id; + union mod_hdcp_message msg; + union mod_hdcp_transition_input trans_input; + struct mod_hdcp_auth_counters count; +}; + +/* contains values per state change */ +struct mod_hdcp_state { + uint8_t id; + uint32_t stay_count; +}; + +/* per event in a state */ +struct mod_hdcp_event_context { + enum mod_hdcp_event event; + uint8_t rx_id_list_ready; + uint8_t unexpected_event; +}; + +struct mod_hdcp { + /* per link */ + struct mod_hdcp_config config; + /* per connection */ + struct mod_hdcp_connection connection; + /* per authentication attempt */ + struct mod_hdcp_authentication auth; + /* per state in an authentication */ + struct mod_hdcp_state state; + /* reserved memory buffer */ + uint8_t buf[2025]; +}; + +enum mod_hdcp_initial_state_id { + HDCP_UNINITIALIZED = 0x0, + HDCP_INITIAL_STATE_START = HDCP_UNINITIALIZED, + HDCP_INITIALIZED, + HDCP_CP_NOT_DESIRED, + HDCP_INITIAL_STATE_END = HDCP_CP_NOT_DESIRED +}; + +enum mod_hdcp_hdcp1_state_id { + HDCP1_STATE_START = HDCP_INITIAL_STATE_END, + H1_A0_WAIT_FOR_ACTIVE_RX, + H1_A1_EXCHANGE_KSVS, + H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER, + H1_A45_AUTHENTICATED, + H1_A8_WAIT_FOR_READY, + H1_A9_READ_KSV_LIST, + HDCP1_STATE_END = H1_A9_READ_KSV_LIST +}; + +enum mod_hdcp_hdcp1_dp_state_id { + HDCP1_DP_STATE_START = HDCP1_STATE_END, + D1_A0_DETERMINE_RX_HDCP_CAPABLE, + D1_A1_EXCHANGE_KSVS, + D1_A23_WAIT_FOR_R0_PRIME, + D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER, + D1_A4_AUTHENTICATED, + D1_A6_WAIT_FOR_READY, + D1_A7_READ_KSV_LIST, + HDCP1_DP_STATE_END = D1_A7_READ_KSV_LIST, +}; + +/* hdcp1 executions and transitions */ +typedef enum mod_hdcp_status (*mod_hdcp_action)(struct mod_hdcp *hdcp); +uint8_t mod_hdcp_execute_and_set( + mod_hdcp_action func, uint8_t *flag, + enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str); +enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input); +enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input); +enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output); +enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output); + +/* log functions */ +void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, + uint8_t *buf, uint32_t buf_size); +/* TODO: add adjustment log */ + +/* psp functions */ +enum mod_hdcp_status mod_hdcp_add_display_topology( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_remove_display_topology( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption( + struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp, + enum mod_hdcp_encryption_status *encryption_status); +/* ddc functions */ +enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_rxcaps(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_rxstatus(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_ake_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_no_stored_km(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_stored_km(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_lc_init(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_eks(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_repeater_auth_ack(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_stream_manage(struct mod_hdcp *hdcp); +enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp); + +/* hdcp version helpers */ +static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp) +{ + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP || + hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); +} + +static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp) +{ + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST); +} + +static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp) +{ + return (hdcp->connection.link.mode == MOD_HDCP_MODE_DEFAULT); +} + +/* hdcp state helpers */ +static inline uint8_t current_state(struct mod_hdcp *hdcp) +{ + return hdcp->state.id; +} + +static inline void set_state_id(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output, uint8_t id) +{ + memset(&hdcp->state, 0, sizeof(hdcp->state)); + hdcp->state.id = id; + /* callback timer should be reset per state */ + output->callback_stop = 1; + output->watchdog_timer_stop = 1; + HDCP_NEXT_STATE_TRACE(hdcp, id, output); +} + +static inline uint8_t is_in_hdcp1_states(struct mod_hdcp *hdcp) +{ + return (current_state(hdcp) > HDCP1_STATE_START && + current_state(hdcp) <= HDCP1_STATE_END); +} + +static inline uint8_t is_in_hdcp1_dp_states(struct mod_hdcp *hdcp) +{ + return (current_state(hdcp) > HDCP1_DP_STATE_START && + current_state(hdcp) <= HDCP1_DP_STATE_END); +} + +static inline uint8_t is_hdcp1(struct mod_hdcp *hdcp) +{ + return (is_in_hdcp1_states(hdcp) || is_in_hdcp1_dp_states(hdcp)); +} + +static inline uint8_t is_in_cp_not_desired_state(struct mod_hdcp *hdcp) +{ + return current_state(hdcp) == HDCP_CP_NOT_DESIRED; +} + +static inline uint8_t is_in_initialized_state(struct mod_hdcp *hdcp) +{ + return current_state(hdcp) == HDCP_INITIALIZED; +} + +/* transition operation helpers */ +static inline void increment_stay_counter(struct mod_hdcp *hdcp) +{ + hdcp->state.stay_count++; +} + +static inline void fail_and_restart_in_ms(uint16_t time, + enum mod_hdcp_status *status, + struct mod_hdcp_output *output) +{ + output->callback_needed = 1; + output->callback_delay = time; + output->watchdog_timer_needed = 0; + output->watchdog_timer_delay = 0; + *status = MOD_HDCP_STATUS_RESET_NEEDED; +} + +static inline void callback_in_ms(uint16_t time, struct mod_hdcp_output *output) +{ + output->callback_needed = 1; + output->callback_delay = time; +} + +static inline void set_watchdog_in_ms(struct mod_hdcp *hdcp, uint16_t time, + struct mod_hdcp_output *output) +{ + output->watchdog_timer_needed = 1; + output->watchdog_timer_delay = time; +} + +/* connection topology helpers */ +static inline uint8_t is_display_active(struct mod_hdcp_display *display) +{ + return display->state >= MOD_HDCP_DISPLAY_ACTIVE; +} + +static inline uint8_t is_display_added(struct mod_hdcp_display *display) +{ + return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; +} + +static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display) +{ + return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; +} + +static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp) +{ + uint8_t added_count = 0; + uint8_t i; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_active(&hdcp->connection.displays[i])) + added_count++; + return added_count; +} + +static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp) +{ + uint8_t added_count = 0; + uint8_t i; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_added(&hdcp->connection.displays[i])) + added_count++; + return added_count; +} + +static inline struct mod_hdcp_display *get_first_added_display( + struct mod_hdcp *hdcp) +{ + uint8_t i; + struct mod_hdcp_display *display = NULL; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (is_display_added(&hdcp->connection.displays[i])) { + display = &hdcp->connection.displays[i]; + break; + } + return display; +} + +static inline struct mod_hdcp_display *get_active_display_at_index( + struct mod_hdcp *hdcp, uint8_t index) +{ + uint8_t i; + struct mod_hdcp_display *display = NULL; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (hdcp->connection.displays[i].index == index && + is_display_active(&hdcp->connection.displays[i])) { + display = &hdcp->connection.displays[i]; + break; + } + return display; +} + +static inline struct mod_hdcp_display *get_empty_display_container( + struct mod_hdcp *hdcp) +{ + uint8_t i; + struct mod_hdcp_display *display = NULL; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) + if (!is_display_active(&hdcp->connection.displays[i])) { + display = &hdcp->connection.displays[i]; + break; + } + return display; +} + +static inline void reset_retry_counts(struct mod_hdcp *hdcp) +{ + hdcp->connection.hdcp1_retry_count = 0; +} + +#endif /* HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c new file mode 100644 index 000000000000..3db4a7da414f --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c @@ -0,0 +1,531 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +static inline enum mod_hdcp_status validate_bksv(struct mod_hdcp *hdcp) +{ + uint64_t n = *(uint64_t *)hdcp->auth.msg.hdcp1.bksv; + uint8_t count = 0; + + while (n) { + count++; + n &= (n - 1); + } + return (count == 20) ? MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_INVALID_BKSV; +} + +static inline enum mod_hdcp_status check_ksv_ready(struct mod_hdcp *hdcp) +{ + if (is_dp_hdcp(hdcp)) + return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_READY_MASK_DP) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY; + return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_READY_MASK) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY; +} + +static inline enum mod_hdcp_status check_hdcp_capable_dp(struct mod_hdcp *hdcp) +{ + return (hdcp->auth.msg.hdcp1.bcaps & BCAPS_HDCP_CAPABLE_MASK_DP) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE; +} + +static inline enum mod_hdcp_status check_r0p_available_dp(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + if (is_dp_hdcp(hdcp)) { + status = (hdcp->auth.msg.hdcp1.bstatus & + BSTATUS_R0_P_AVAILABLE_MASK_DP) ? + MOD_HDCP_STATUS_SUCCESS : + MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING; + } else { + status = MOD_HDCP_STATUS_INVALID_OPERATION; + } + return status; +} + +static inline enum mod_hdcp_status check_link_integrity_dp( + struct mod_hdcp *hdcp) +{ + return (hdcp->auth.msg.hdcp1.bstatus & + BSTATUS_LINK_INTEGRITY_FAILURE_MASK_DP) ? + MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE : + MOD_HDCP_STATUS_SUCCESS; +} + +static inline enum mod_hdcp_status check_no_reauthentication_request_dp( + struct mod_hdcp *hdcp) +{ + return (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_REAUTH_REQUEST_MASK_DP) ? + MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED : + MOD_HDCP_STATUS_SUCCESS; +} + +static inline enum mod_hdcp_status check_no_max_cascade(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = (hdcp->auth.msg.hdcp1.binfo_dp & + BINFO_MAX_CASCADE_EXCEEDED_MASK_DP) ? + MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + else + status = (hdcp->auth.msg.hdcp1.bstatus & + BSTATUS_MAX_CASCADE_EXCEEDED_MASK) ? + MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + return status; +} + +static inline enum mod_hdcp_status check_no_max_devs(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = (hdcp->auth.msg.hdcp1.binfo_dp & + BINFO_MAX_DEVS_EXCEEDED_MASK_DP) ? + MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + else + status = (hdcp->auth.msg.hdcp1.bstatus & + BSTATUS_MAX_DEVS_EXCEEDED_MASK) ? + MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE : + MOD_HDCP_STATUS_SUCCESS; + return status; +} + +static inline uint8_t get_device_count(struct mod_hdcp *hdcp) +{ + return is_dp_hdcp(hdcp) ? + (hdcp->auth.msg.hdcp1.binfo_dp & BINFO_DEVICE_COUNT_MASK_DP) : + (hdcp->auth.msg.hdcp1.bstatus & BSTATUS_DEVICE_COUNT_MASK); +} + +static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp) +{ + /* device count must be greater than or equal to tracked hdcp displays */ + return (get_device_count(hdcp) < get_added_display_count(hdcp)) ? + MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE : + MOD_HDCP_STATUS_SUCCESS; +} + +static enum mod_hdcp_status wait_for_active_rx(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv, + &input->bksv_read, &status, + hdcp, "bksv_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps, + &input->bcaps_read, &status, + hdcp, "bcaps_read")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology, + &input->add_topology, &status, + hdcp, "add_topology")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session, + &input->create_session, &status, + hdcp, "create_session")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_an, + &input->an_write, &status, + hdcp, "an_write")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_write_aksv, + &input->aksv_write, &status, + hdcp, "aksv_write")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bksv, + &input->bksv_read, &status, + hdcp, "bksv_read")) + goto out; + if (!mod_hdcp_execute_and_set(validate_bksv, + &input->bksv_validation, &status, + hdcp, "bksv_validation")) + goto out; + if (hdcp->auth.msg.hdcp1.ainfo) { + if (!mod_hdcp_execute_and_set(mod_hdcp_write_ainfo, + &input->ainfo_write, &status, + hdcp, "ainfo_write")) + goto out; + } +out: + return status; +} + +static enum mod_hdcp_status computations_validate_rx_test_for_repeater( + struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_r0p, + &input->r0p_read, &status, + hdcp, "r0p_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_rx, + &input->rx_validation, &status, + hdcp, "rx_validation")) + goto out; + if (hdcp->connection.is_repeater) { + if (!hdcp->connection.link.adjust.hdcp1.postpone_encryption) + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp1_enable_encryption, + &input->encryption, &status, + hdcp, "encryption")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption, + &input->encryption, &status, + hdcp, "encryption")) + goto out; + if (is_dp_mst_hdcp(hdcp)) + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp1_enable_dp_stream_encryption, + &input->stream_encryption_dp, &status, + hdcp, "stream_encryption_dp")) + goto out; + } +out: + return status; +} + +static enum mod_hdcp_status authenticated(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_link_maintenance, + &input->link_maintenance, &status, + hdcp, "link_maintenance")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status wait_for_ready(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK && + event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (is_dp_hdcp(hdcp)) { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_link_integrity_dp, + &input->link_integiry_check, &status, + hdcp, "link_integiry_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, + &input->reauth_request_check, &status, + hdcp, "reauth_request_check")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps, + &input->bcaps_read, &status, + hdcp, "bcaps_read")) + goto out; + } + if (!mod_hdcp_execute_and_set(check_ksv_ready, + &input->ready_check, &status, + hdcp, "ready_check")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status read_ksv_list(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + uint8_t device_count; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (is_dp_hdcp(hdcp)) { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_binfo, + &input->binfo_read_dp, &status, + hdcp, "binfo_read_dp")) + goto out; + } else { + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + } + if (!mod_hdcp_execute_and_set(check_no_max_cascade, + &input->max_cascade_check, &status, + hdcp, "max_cascade_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_no_max_devs, + &input->max_devs_check, &status, + hdcp, "max_devs_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_device_count, + &input->device_count_check, &status, + hdcp, "device_count_check")) + goto out; + device_count = get_device_count(hdcp); + hdcp->auth.msg.hdcp1.ksvlist_size = device_count*5; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_ksvlist, + &input->ksvlist_read, &status, + hdcp, "ksvlist_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_read_vp, + &input->vp_read, &status, + hdcp, "vp_read")) + goto out; + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_validate_ksvlist_vp, + &input->ksvlist_vp_validation, &status, + hdcp, "ksvlist_vp_validation")) + goto out; + if (input->encryption != PASS) + if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_enable_encryption, + &input->encryption, &status, + hdcp, "encryption")) + goto out; + if (is_dp_mst_hdcp(hdcp)) + if (!mod_hdcp_execute_and_set( + mod_hdcp_hdcp1_enable_dp_stream_encryption, + &input->stream_encryption_dp, &status, + hdcp, "stream_encryption_dp")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status determine_rx_hdcp_capable_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CALLBACK) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bcaps, + &input->bcaps_read, &status, + hdcp, "bcaps_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_hdcp_capable_dp, + &input->hdcp_capable_dp, &status, + hdcp, "hdcp_capable_dp")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status wait_for_r0_prime_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ && + event_ctx->event != MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_r0p_available_dp, + &input->r0p_available_dp, &status, + hdcp, "r0p_available_dp")) + goto out; +out: + return status; +} + +static enum mod_hdcp_status authenticated_dp(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + if (event_ctx->event != MOD_HDCP_EVENT_CPIRQ) { + event_ctx->unexpected_event = 1; + goto out; + } + + if (!mod_hdcp_execute_and_set(mod_hdcp_read_bstatus, + &input->bstatus_read, &status, + hdcp, "bstatus_read")) + goto out; + if (!mod_hdcp_execute_and_set(check_link_integrity_dp, + &input->link_integiry_check, &status, + hdcp, "link_integiry_check")) + goto out; + if (!mod_hdcp_execute_and_set(check_no_reauthentication_request_dp, + &input->reauth_request_check, &status, + hdcp, "reauth_request_check")) + goto out; +out: + return status; +} + +uint8_t mod_hdcp_execute_and_set( + mod_hdcp_action func, uint8_t *flag, + enum mod_hdcp_status *status, struct mod_hdcp *hdcp, char *str) +{ + *status = func(hdcp); + if (*status == MOD_HDCP_STATUS_SUCCESS && *flag != PASS) { + HDCP_INPUT_PASS_TRACE(hdcp, str); + *flag = PASS; + } else if (*status != MOD_HDCP_STATUS_SUCCESS && *flag != FAIL) { + HDCP_INPUT_FAIL_TRACE(hdcp, str); + *flag = FAIL; + } + return (*status == MOD_HDCP_STATUS_SUCCESS); +} + +enum mod_hdcp_status mod_hdcp_hdcp1_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + switch (current_state(hdcp)) { + case H1_A0_WAIT_FOR_ACTIVE_RX: + status = wait_for_active_rx(hdcp, event_ctx, input); + break; + case H1_A1_EXCHANGE_KSVS: + status = exchange_ksvs(hdcp, event_ctx, input); + break; + case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: + status = computations_validate_rx_test_for_repeater(hdcp, + event_ctx, input); + break; + case H1_A45_AUTHENTICATED: + status = authenticated(hdcp, event_ctx, input); + break; + case H1_A8_WAIT_FOR_READY: + status = wait_for_ready(hdcp, event_ctx, input); + break; + case H1_A9_READ_KSV_LIST: + status = read_ksv_list(hdcp, event_ctx, input); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + break; + } + + return status; +} + +extern enum mod_hdcp_status mod_hdcp_hdcp1_dp_execution(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + + switch (current_state(hdcp)) { + case D1_A0_DETERMINE_RX_HDCP_CAPABLE: + status = determine_rx_hdcp_capable_dp(hdcp, event_ctx, input); + break; + case D1_A1_EXCHANGE_KSVS: + status = exchange_ksvs(hdcp, event_ctx, input); + break; + case D1_A23_WAIT_FOR_R0_PRIME: + status = wait_for_r0_prime_dp(hdcp, event_ctx, input); + break; + case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER: + status = computations_validate_rx_test_for_repeater( + hdcp, event_ctx, input); + break; + case D1_A4_AUTHENTICATED: + status = authenticated_dp(hdcp, event_ctx, input); + break; + case D1_A6_WAIT_FOR_READY: + status = wait_for_ready(hdcp, event_ctx, input); + break; + case D1_A7_READ_KSV_LIST: + status = read_ksv_list(hdcp, event_ctx, input); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + break; + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c new file mode 100644 index 000000000000..136b8011ff3f --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c @@ -0,0 +1,307 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_connection *conn = &hdcp->connection; + struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + + switch (current_state(hdcp)) { + case H1_A0_WAIT_FOR_ACTIVE_RX: + if (input->bksv_read != PASS || input->bcaps_read != PASS) { + /* 1A-04: repeatedly attempts on port access failure */ + callback_in_ms(500, output); + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS); + break; + case H1_A1_EXCHANGE_KSVS: + if (input->add_topology != PASS || + input->create_session != PASS) { + /* out of sync with psp state */ + adjust->hdcp1.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->an_write != PASS || + input->aksv_write != PASS || + input->bksv_read != PASS || + input->bksv_validation != PASS || + input->ainfo_write == FAIL) { + /* 1A-05: consider invalid bksv a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(300, output); + set_state_id(hdcp, output, + H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER); + break; + case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: + if (input->bcaps_read != PASS || + input->r0p_read != PASS || + input->rx_validation != PASS || + (!conn->is_repeater && input->encryption != PASS)) { + /* 1A-06: consider invalid r0' a failure */ + /* 1A-08: consider bksv listed in SRM a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_repeater) { + callback_in_ms(0, output); + set_watchdog_in_ms(hdcp, 5000, output); + set_state_id(hdcp, output, H1_A8_WAIT_FOR_READY); + } else { + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A45_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + } + break; + case H1_A45_AUTHENTICATED: + if (input->link_maintenance != PASS) { + /* 1A-07: consider invalid ri' a failure */ + /* 1A-07a: consider read ri' not returned a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(500, output); + increment_stay_counter(hdcp); + break; + case H1_A8_WAIT_FOR_READY: + if (input->ready_check != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1B-03: fail hdcp on ksv list READY timeout */ + /* prevent black screen in next attempt */ + adjust->hdcp1.postpone_encryption = 1; + fail_and_restart_in_ms(0, &status, output); + } else { + /* continue ksv list READY polling*/ + callback_in_ms(500, output); + increment_stay_counter(hdcp); + } + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A9_READ_KSV_LIST); + break; + case H1_A9_READ_KSV_LIST: + if (input->bstatus_read != PASS || + input->max_cascade_check != PASS || + input->max_devs_check != PASS || + input->device_count_check != PASS || + input->ksvlist_read != PASS || + input->vp_read != PASS || + input->ksvlist_vp_validation != PASS || + input->encryption != PASS) { + /* 1B-06: consider MAX_CASCADE_EXCEEDED a failure */ + /* 1B-05: consider MAX_DEVS_EXCEEDED a failure */ + /* 1B-04: consider invalid v' a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, H1_A45_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + break; + default: + status = MOD_HDCP_STATUS_INVALID_STATE; + fail_and_restart_in_ms(0, &status, output); + break; + } + + return status; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp, + struct mod_hdcp_event_context *event_ctx, + struct mod_hdcp_transition_input_hdcp1 *input, + struct mod_hdcp_output *output) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS; + struct mod_hdcp_connection *conn = &hdcp->connection; + struct mod_hdcp_link_adjustment *adjust = &hdcp->connection.link.adjust; + + switch (current_state(hdcp)) { + case D1_A0_DETERMINE_RX_HDCP_CAPABLE: + if (input->bcaps_read != PASS) { + /* 1A-04: no authentication on bcaps read failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->hdcp_capable_dp != PASS) { + adjust->hdcp1.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS); + break; + case D1_A1_EXCHANGE_KSVS: + if (input->add_topology != PASS || + input->create_session != PASS) { + /* out of sync with psp state */ + adjust->hdcp1.disable = 1; + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->an_write != PASS || + input->aksv_write != PASS || + input->bksv_read != PASS || + input->bksv_validation != PASS || + input->ainfo_write == FAIL) { + /* 1A-05: consider invalid bksv a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + set_watchdog_in_ms(hdcp, 100, output); + set_state_id(hdcp, output, D1_A23_WAIT_FOR_R0_PRIME); + break; + case D1_A23_WAIT_FOR_R0_PRIME: + if (input->bstatus_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->r0p_available_dp != PASS) { + if (event_ctx->event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) + fail_and_restart_in_ms(0, &status, output); + else + increment_stay_counter(hdcp); + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER); + break; + case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER: + if (input->r0p_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->rx_validation != PASS) { + if (hdcp->state.stay_count < 2) { + /* allow 2 additional retries */ + callback_in_ms(0, output); + increment_stay_counter(hdcp); + } else { + /* + * 1A-06: consider invalid r0' a failure + * after 3 attempts. + * 1A-08: consider bksv listed in SRM a failure + */ + fail_and_restart_in_ms(0, &status, output); + } + break; + } else if ((!conn->is_repeater && input->encryption != PASS) || + (!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { + fail_and_restart_in_ms(0, &status, output); + break; + } + if (conn->is_repeater) { + set_watchdog_in_ms(hdcp, 5000, output); + set_state_id(hdcp, output, D1_A6_WAIT_FOR_READY); + } else { + set_state_id(hdcp, output, D1_A4_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + } + break; + case D1_A4_AUTHENTICATED: + if (input->link_integiry_check != PASS || + input->reauth_request_check != PASS) { + /* 1A-07: restart hdcp on a link integrity failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } + break; + case D1_A6_WAIT_FOR_READY: + if (input->link_integiry_check == FAIL || + input->reauth_request_check == FAIL) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->ready_check != PASS) { + if (event_ctx->event == + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) { + /* 1B-04: fail hdcp on ksv list READY timeout */ + /* prevent black screen in next attempt */ + adjust->hdcp1.postpone_encryption = 1; + fail_and_restart_in_ms(0, &status, output); + } else { + increment_stay_counter(hdcp); + } + break; + } + callback_in_ms(0, output); + set_state_id(hdcp, output, D1_A7_READ_KSV_LIST); + break; + case D1_A7_READ_KSV_LIST: + if (input->binfo_read_dp != PASS || + input->max_cascade_check != PASS || + input->max_devs_check != PASS) { + /* 1B-06: consider MAX_DEVS_EXCEEDED a failure */ + /* 1B-07: consider MAX_CASCADE_EXCEEDED a failure */ + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->device_count_check != PASS) { + /* + * some slow dongle doesn't update + * device count as soon as downstream is connected. + * give it more time to react. + */ + adjust->hdcp1.postpone_encryption = 1; + fail_and_restart_in_ms(1000, &status, output); + break; + } else if (input->ksvlist_read != PASS || + input->vp_read != PASS) { + fail_and_restart_in_ms(0, &status, output); + break; + } else if (input->ksvlist_vp_validation != PASS) { + if (hdcp->state.stay_count < 2) { + /* allow 2 additional retries */ + callback_in_ms(0, output); + increment_stay_counter(hdcp); + } else { + /* + * 1B-05: consider invalid v' a failure + * after 3 attempts. + */ + fail_and_restart_in_ms(0, &status, output); + } + break; + } else if (input->encryption != PASS || + (is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) { + fail_and_restart_in_ms(0, &status, output); + break; + } + set_state_id(hdcp, output, D1_A4_AUTHENTICATED); + HDCP_FULL_DDC_TRACE(hdcp); + break; + default: + fail_and_restart_in_ms(0, &status, output); + break; + } + + return status; +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c new file mode 100644 index 000000000000..e7baae059b85 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c @@ -0,0 +1,305 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "hdcp.h" + +#define MIN(a, b) ((a) < (b) ? (a) : (b)) +#define HDCP_I2C_ADDR 0x3a /* 0x74 >> 1*/ +#define KSV_READ_SIZE 0xf /* 0x6803b - 0x6802c */ +#define HDCP_MAX_AUX_TRANSACTION_SIZE 16 + +enum mod_hdcp_ddc_message_id { + MOD_HDCP_MESSAGE_ID_INVALID = -1, + + /* HDCP 1.4 */ + + MOD_HDCP_MESSAGE_ID_READ_BKSV = 0, + MOD_HDCP_MESSAGE_ID_READ_RI_R0, + MOD_HDCP_MESSAGE_ID_WRITE_AKSV, + MOD_HDCP_MESSAGE_ID_WRITE_AINFO, + MOD_HDCP_MESSAGE_ID_WRITE_AN, + MOD_HDCP_MESSAGE_ID_READ_VH_X, + MOD_HDCP_MESSAGE_ID_READ_VH_0, + MOD_HDCP_MESSAGE_ID_READ_VH_1, + MOD_HDCP_MESSAGE_ID_READ_VH_2, + MOD_HDCP_MESSAGE_ID_READ_VH_3, + MOD_HDCP_MESSAGE_ID_READ_VH_4, + MOD_HDCP_MESSAGE_ID_READ_BCAPS, + MOD_HDCP_MESSAGE_ID_READ_BSTATUS, + MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO, + MOD_HDCP_MESSAGE_ID_READ_BINFO, + + MOD_HDCP_MESSAGE_ID_MAX +}; + +static const uint8_t hdcp_i2c_offsets[] = { + [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x0, + [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x8, + [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x10, + [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x15, + [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x18, + [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x20, + [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x20, + [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x24, + [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x28, + [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x2C, + [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x30, + [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x40, + [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x41, + [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x43, + [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0xFF, +}; + +static const uint32_t hdcp_dpcd_addrs[] = { + [MOD_HDCP_MESSAGE_ID_READ_BKSV] = 0x68000, + [MOD_HDCP_MESSAGE_ID_READ_RI_R0] = 0x68005, + [MOD_HDCP_MESSAGE_ID_WRITE_AKSV] = 0x68007, + [MOD_HDCP_MESSAGE_ID_WRITE_AINFO] = 0x6803B, + [MOD_HDCP_MESSAGE_ID_WRITE_AN] = 0x6800c, + [MOD_HDCP_MESSAGE_ID_READ_VH_X] = 0x68014, + [MOD_HDCP_MESSAGE_ID_READ_VH_0] = 0x68014, + [MOD_HDCP_MESSAGE_ID_READ_VH_1] = 0x68018, + [MOD_HDCP_MESSAGE_ID_READ_VH_2] = 0x6801c, + [MOD_HDCP_MESSAGE_ID_READ_VH_3] = 0x68020, + [MOD_HDCP_MESSAGE_ID_READ_VH_4] = 0x68024, + [MOD_HDCP_MESSAGE_ID_READ_BCAPS] = 0x68028, + [MOD_HDCP_MESSAGE_ID_READ_BSTATUS] = 0x68029, + [MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO] = 0x6802c, + [MOD_HDCP_MESSAGE_ID_READ_BINFO] = 0x6802a, +}; + +static enum mod_hdcp_status read(struct mod_hdcp *hdcp, + enum mod_hdcp_ddc_message_id msg_id, + uint8_t *buf, + uint32_t buf_len) +{ + bool success = true; + uint32_t cur_size = 0; + uint32_t data_offset = 0; + + if (is_dp_hdcp(hdcp)) { + while (buf_len > 0) { + cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); + success = hdcp->config.ddc.funcs.read_dpcd(hdcp->config.ddc.handle, + hdcp_dpcd_addrs[msg_id] + data_offset, + buf + data_offset, + cur_size); + + if (!success) + break; + + buf_len -= cur_size; + data_offset += cur_size; + } + } else { + success = hdcp->config.ddc.funcs.read_i2c( + hdcp->config.ddc.handle, + HDCP_I2C_ADDR, + hdcp_i2c_offsets[msg_id], + buf, + (uint32_t)buf_len); + } + + return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE; +} + +static enum mod_hdcp_status read_repeatedly(struct mod_hdcp *hdcp, + enum mod_hdcp_ddc_message_id msg_id, + uint8_t *buf, + uint32_t buf_len, + uint8_t read_size) +{ + enum mod_hdcp_status status = MOD_HDCP_STATUS_DDC_FAILURE; + uint32_t cur_size = 0; + uint32_t data_offset = 0; + + while (buf_len > 0) { + cur_size = MIN(buf_len, read_size); + status = read(hdcp, msg_id, buf + data_offset, cur_size); + + if (status != MOD_HDCP_STATUS_SUCCESS) + break; + + buf_len -= cur_size; + data_offset += cur_size; + } + + return status; +} + +static enum mod_hdcp_status write(struct mod_hdcp *hdcp, + enum mod_hdcp_ddc_message_id msg_id, + uint8_t *buf, + uint32_t buf_len) +{ + bool success = true; + uint32_t cur_size = 0; + uint32_t data_offset = 0; + + if (is_dp_hdcp(hdcp)) { + while (buf_len > 0) { + cur_size = MIN(buf_len, HDCP_MAX_AUX_TRANSACTION_SIZE); + success = hdcp->config.ddc.funcs.write_dpcd( + hdcp->config.ddc.handle, + hdcp_dpcd_addrs[msg_id] + data_offset, + buf + data_offset, + cur_size); + + if (!success) + break; + + buf_len -= cur_size; + data_offset += cur_size; + } + } else { + hdcp->buf[0] = hdcp_i2c_offsets[msg_id]; + memmove(&hdcp->buf[1], buf, buf_len); + success = hdcp->config.ddc.funcs.write_i2c( + hdcp->config.ddc.handle, + HDCP_I2C_ADDR, + hdcp->buf, + (uint32_t)(buf_len+1)); + } + + return success ? MOD_HDCP_STATUS_SUCCESS : MOD_HDCP_STATUS_DDC_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_read_bksv(struct mod_hdcp *hdcp) +{ + return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BKSV, + hdcp->auth.msg.hdcp1.bksv, + sizeof(hdcp->auth.msg.hdcp1.bksv)); +} + +enum mod_hdcp_status mod_hdcp_read_bcaps(struct mod_hdcp *hdcp) +{ + return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BCAPS, + &hdcp->auth.msg.hdcp1.bcaps, + sizeof(hdcp->auth.msg.hdcp1.bcaps)); +} + +enum mod_hdcp_status mod_hdcp_read_bstatus(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS, + (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, + 1); + else + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BSTATUS, + (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, + sizeof(hdcp->auth.msg.hdcp1.bstatus)); + return status; +} + +enum mod_hdcp_status mod_hdcp_read_r0p(struct mod_hdcp *hdcp) +{ + return read(hdcp, MOD_HDCP_MESSAGE_ID_READ_RI_R0, + (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, + sizeof(hdcp->auth.msg.hdcp1.r0p)); +} + +/* special case, reading repeatedly at the same address, don't use read() */ +enum mod_hdcp_status mod_hdcp_read_ksvlist(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = read_repeatedly(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO, + hdcp->auth.msg.hdcp1.ksvlist, + hdcp->auth.msg.hdcp1.ksvlist_size, + KSV_READ_SIZE); + else + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_KSV_FIFO, + (uint8_t *)&hdcp->auth.msg.hdcp1.ksvlist, + hdcp->auth.msg.hdcp1.ksvlist_size); + return status; +} + +enum mod_hdcp_status mod_hdcp_read_vp(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_0, + &hdcp->auth.msg.hdcp1.vp[0], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_1, + &hdcp->auth.msg.hdcp1.vp[4], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_2, + &hdcp->auth.msg.hdcp1.vp[8], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_3, + &hdcp->auth.msg.hdcp1.vp[12], 4); + if (status != MOD_HDCP_STATUS_SUCCESS) + goto out; + + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_VH_4, + &hdcp->auth.msg.hdcp1.vp[16], 4); +out: + return status; +} + +enum mod_hdcp_status mod_hdcp_read_binfo(struct mod_hdcp *hdcp) +{ + enum mod_hdcp_status status; + + if (is_dp_hdcp(hdcp)) + status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_BINFO, + (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, + sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); + else + status = MOD_HDCP_STATUS_INVALID_OPERATION; + + return status; +} + +enum mod_hdcp_status mod_hdcp_write_aksv(struct mod_hdcp *hdcp) +{ + return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AKSV, + hdcp->auth.msg.hdcp1.aksv, + sizeof(hdcp->auth.msg.hdcp1.aksv)); +} + +enum mod_hdcp_status mod_hdcp_write_ainfo(struct mod_hdcp *hdcp) +{ + return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AINFO, + &hdcp->auth.msg.hdcp1.ainfo, + sizeof(hdcp->auth.msg.hdcp1.ainfo)); +} + +enum mod_hdcp_status mod_hdcp_write_an(struct mod_hdcp *hdcp) +{ + return write(hdcp, MOD_HDCP_MESSAGE_ID_WRITE_AN, + hdcp->auth.msg.hdcp1.an, + sizeof(hdcp->auth.msg.hdcp1.an)); +} diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c new file mode 100644 index 000000000000..3982ced5f969 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c @@ -0,0 +1,163 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + + +#include "hdcp.h" + +void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size, + uint8_t *buf, uint32_t buf_size) +{ + const uint8_t bytes_per_line = 16, + byte_size = 3, + newline_size = 1, + terminator_size = 1; + uint32_t line_count = msg_size / bytes_per_line, + trailing_bytes = msg_size % bytes_per_line; + uint32_t target_size = (byte_size * bytes_per_line + newline_size) * line_count + + byte_size * trailing_bytes + newline_size + terminator_size; + uint32_t buf_pos = 0; + uint32_t i = 0; + + if (buf_size >= target_size) { + for (i = 0; i < msg_size; i++) { + if (i % bytes_per_line == 0) + buf[buf_pos++] = '\n'; + sprintf(&buf[buf_pos], "%02X ", msg[i]); + buf_pos += byte_size; + } + buf[buf_pos++] = '\0'; + } +} + +char *mod_hdcp_status_to_str(int32_t status) +{ + switch (status) { + case MOD_HDCP_STATUS_SUCCESS: + return "MOD_HDCP_STATUS_SUCCESS"; + case MOD_HDCP_STATUS_FAILURE: + return "MOD_HDCP_STATUS_FAILURE"; + case MOD_HDCP_STATUS_RESET_NEEDED: + return "MOD_HDCP_STATUS_RESET_NEEDED"; + case MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND: + return "MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND"; + case MOD_HDCP_STATUS_DISPLAY_NOT_FOUND: + return "MOD_HDCP_STATUS_DISPLAY_NOT_FOUND"; + case MOD_HDCP_STATUS_INVALID_STATE: + return "MOD_HDCP_STATUS_INVALID_STATE"; + case MOD_HDCP_STATUS_NOT_IMPLEMENTED: + return "MOD_HDCP_STATUS_NOT_IMPLEMENTED"; + case MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE: + return "MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE"; + case MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE: + return "MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE"; + case MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE: + return "MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE"; + case MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE: + return "MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER: + return "MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER"; + case MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE: + return "MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE"; + case MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING: + return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING"; + case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY: + return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY"; + case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION: + return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION"; + case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED: + return "MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED"; + case MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE: + return "MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE"; + case MOD_HDCP_STATUS_HDCP1_INVALID_BKSV: + return "MOD_HDCP_STATUS_HDCP1_INVALID_BKSV"; + case MOD_HDCP_STATUS_DDC_FAILURE: + return "MOD_HDCP_STATUS_DDC_FAILURE"; + case MOD_HDCP_STATUS_INVALID_OPERATION: + return "MOD_HDCP_STATUS_INVALID_OPERATION"; + default: + return "MOD_HDCP_STATUS_UNKNOWN"; + } +} + +char *mod_hdcp_state_id_to_str(int32_t id) +{ + switch (id) { + case HDCP_UNINITIALIZED: + return "HDCP_UNINITIALIZED"; + case HDCP_INITIALIZED: + return "HDCP_INITIALIZED"; + case HDCP_CP_NOT_DESIRED: + return "HDCP_CP_NOT_DESIRED"; + case H1_A0_WAIT_FOR_ACTIVE_RX: + return "H1_A0_WAIT_FOR_ACTIVE_RX"; + case H1_A1_EXCHANGE_KSVS: + return "H1_A1_EXCHANGE_KSVS"; + case H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER: + return "H1_A2_COMPUTATIONS_A3_VALIDATE_RX_A6_TEST_FOR_REPEATER"; + case H1_A45_AUTHENTICATED: + return "H1_A45_AUTHENTICATED"; + case H1_A8_WAIT_FOR_READY: + return "H1_A8_WAIT_FOR_READY"; + case H1_A9_READ_KSV_LIST: + return "H1_A9_READ_KSV_LIST"; + case D1_A0_DETERMINE_RX_HDCP_CAPABLE: + return "D1_A0_DETERMINE_RX_HDCP_CAPABLE"; + case D1_A1_EXCHANGE_KSVS: + return "D1_A1_EXCHANGE_KSVS"; + case D1_A23_WAIT_FOR_R0_PRIME: + return "D1_A23_WAIT_FOR_R0_PRIME"; + case D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER: + return "D1_A2_COMPUTATIONS_A3_VALIDATE_RX_A5_TEST_FOR_REPEATER"; + case D1_A4_AUTHENTICATED: + return "D1_A4_AUTHENTICATED"; + case D1_A6_WAIT_FOR_READY: + return "D1_A6_WAIT_FOR_READY"; + case D1_A7_READ_KSV_LIST: + return "D1_A7_READ_KSV_LIST"; + default: + return "UNKNOWN_STATE_ID"; + }; +} + diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h new file mode 100644 index 000000000000..2fd0e0a893ef --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h @@ -0,0 +1,139 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MOD_HDCP_LOG_H_ +#define MOD_HDCP_LOG_H_ + +#ifdef CONFIG_DRM_AMD_DC_HDCP +#define HDCP_LOG_ERR(hdcp, ...) DRM_ERROR(__VA_ARGS__) +#define HDCP_LOG_VER(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) +#define HDCP_LOG_FSM(hdcp, ...) DRM_DEBUG_KMS(__VA_ARGS__) +#define HDCP_LOG_TOP(hdcp, ...) pr_debug("[HDCP_TOP]:"__VA_ARGS__) +#define HDCP_LOG_DDC(hdcp, ...) pr_debug("[HDCP_DDC]:"__VA_ARGS__) +#endif + +/* default logs */ +#define HDCP_ERROR_TRACE(hdcp, status) \ + HDCP_LOG_ERR(hdcp, \ + "[Link %d] ERROR %s IN STATE %s", \ + hdcp->config.index, \ + mod_hdcp_status_to_str(status), \ + mod_hdcp_state_id_to_str(hdcp->state.id)) +#define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \ + HDCP_LOG_VER(hdcp, \ + "[Link %d] HDCP 1.4 enabled on display %d", \ + hdcp->config.index, displayIndex) +/* state machine logs */ +#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d] HDCP_REMOVE_DISPLAY index %d", \ + hdcp->config.index, displayIndex) +#define HDCP_INPUT_PASS_TRACE(hdcp, str) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d]\tPASS %s", \ + hdcp->config.index, str) +#define HDCP_INPUT_FAIL_TRACE(hdcp, str) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d]\tFAIL %s", \ + hdcp->config.index, str) +#define HDCP_NEXT_STATE_TRACE(hdcp, id, output) do { \ + if (output->watchdog_timer_needed) \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d] > %s with %d ms watchdog", \ + hdcp->config.index, \ + mod_hdcp_state_id_to_str(id), output->watchdog_timer_delay); \ + else \ + HDCP_LOG_FSM(hdcp, \ + "[Link %d] > %s", hdcp->config.index, \ + mod_hdcp_state_id_to_str(id)); \ +} while (0) +#define HDCP_TIMEOUT_TRACE(hdcp) \ + HDCP_LOG_FSM(hdcp, "[Link %d] --> TIMEOUT", hdcp->config.index) +#define HDCP_CPIRQ_TRACE(hdcp) \ + HDCP_LOG_FSM(hdcp, "[Link %d] --> CPIRQ", hdcp->config.index) +#define HDCP_EVENT_TRACE(hdcp, event) \ + if (event == MOD_HDCP_EVENT_WATCHDOG_TIMEOUT) \ + HDCP_TIMEOUT_TRACE(hdcp); \ + else if (event == MOD_HDCP_EVENT_CPIRQ) \ + HDCP_CPIRQ_TRACE(hdcp) +/* TODO: find some way to tell if logging is off to save time */ +#define HDCP_DDC_READ_TRACE(hdcp, msg_name, msg, msg_size) do { \ + mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \ + sizeof(hdcp->buf)); \ + HDCP_LOG_DDC(hdcp, "[Link %d] Read %s%s", hdcp->config.index, \ + msg_name, hdcp->buf); \ +} while (0) +#define HDCP_DDC_WRITE_TRACE(hdcp, msg_name, msg, msg_size) do { \ + mod_hdcp_dump_binary_message(msg, msg_size, hdcp->buf, \ + sizeof(hdcp->buf)); \ + HDCP_LOG_DDC(hdcp, "[Link %d] Write %s%s", \ + hdcp->config.index, msg_name,\ + hdcp->buf); \ +} while (0) +#define HDCP_FULL_DDC_TRACE(hdcp) do { \ + HDCP_DDC_READ_TRACE(hdcp, "BKSV", hdcp->auth.msg.hdcp1.bksv, \ + sizeof(hdcp->auth.msg.hdcp1.bksv)); \ + HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \ + sizeof(hdcp->auth.msg.hdcp1.bcaps)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \ + sizeof(hdcp->auth.msg.hdcp1.an)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \ + sizeof(hdcp->auth.msg.hdcp1.aksv)); \ + HDCP_DDC_WRITE_TRACE(hdcp, "AINFO", &hdcp->auth.msg.hdcp1.ainfo, \ + sizeof(hdcp->auth.msg.hdcp1.ainfo)); \ + HDCP_DDC_READ_TRACE(hdcp, "RI' / R0'", \ + (uint8_t *)&hdcp->auth.msg.hdcp1.r0p, \ + sizeof(hdcp->auth.msg.hdcp1.r0p)); \ + HDCP_DDC_READ_TRACE(hdcp, "BINFO", \ + (uint8_t *)&hdcp->auth.msg.hdcp1.binfo_dp, \ + sizeof(hdcp->auth.msg.hdcp1.binfo_dp)); \ + HDCP_DDC_READ_TRACE(hdcp, "KSVLIST", hdcp->auth.msg.hdcp1.ksvlist, \ + hdcp->auth.msg.hdcp1.ksvlist_size); \ + HDCP_DDC_READ_TRACE(hdcp, "V'", hdcp->auth.msg.hdcp1.vp, \ + sizeof(hdcp->auth.msg.hdcp1.vp)); \ +} while (0) +#define HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, i) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tadd display %d", \ + hdcp->config.index, i) +#define HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, i) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tremove display %d", \ + hdcp->config.index, i) +#define HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\tdestroy hdcp1 session", \ + hdcp->config.index) +#define HDCP_TOP_RESET_AUTH_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\treset authentication", hdcp->config.index) +#define HDCP_TOP_RESET_CONN_TRACE(hdcp) \ + HDCP_LOG_TOP(hdcp, "[Link %d]\treset connection", hdcp->config.index) +#define HDCP_TOP_INTERFACE_TRACE(hdcp) do { \ + HDCP_LOG_TOP(hdcp, "\n"); \ + HDCP_LOG_TOP(hdcp, "[Link %d] %s", hdcp->config.index, __func__); \ +} while (0) +#define HDCP_TOP_INTERFACE_TRACE_WITH_INDEX(hdcp, i) do { \ + HDCP_LOG_TOP(hdcp, "\n"); \ + HDCP_LOG_TOP(hdcp, "[Link %d] %s display %d", hdcp->config.index, __func__, i); \ +} while (0) + +#endif // MOD_HDCP_LOG_H_ diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c new file mode 100644 index 000000000000..646d909bbc37 --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -0,0 +1,328 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#define MAX_NUM_DISPLAYS 24 + + +#include "hdcp.h" + +#include "amdgpu.h" +#include "hdcp_psp.h" + +enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_dtm_shared_memory *dtm_cmd; + struct mod_hdcp_display *display = NULL; + uint8_t i; + + dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED) { + + memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); + + display = &hdcp->connection.displays[i]; + + dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; + dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index; + dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0; + dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; + + psp_dtm_invoke(psp, dtm_cmd->cmd_id); + + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) + return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + + display->state = MOD_HDCP_DISPLAY_ACTIVE; + HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index); + } + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_dtm_shared_memory *dtm_cmd; + struct mod_hdcp_display *display = NULL; + struct mod_hdcp_link *link = &hdcp->connection.link; + uint8_t i; + + if (!psp->dtm_context.dtm_initialized) { + DRM_ERROR("Failed to add display topology, DTM TA is not initialized."); + return MOD_HDCP_STATUS_FAILURE; + } + + dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) { + display = &hdcp->connection.displays[i]; + + memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); + + dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2; + dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index; + dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1; + dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller; + dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line; + dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be; + dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe; + dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id; + dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version = + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x; + dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; + + psp_dtm_invoke(psp, dtm_cmd->cmd_id); + + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) + return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE; + + display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED; + HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index); + } + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct mod_hdcp_display *display = get_first_added_display(hdcp); + struct ta_hdcp_shared_memory *hdcp_cmd; + + if (!psp->hdcp_context.hdcp_initialized) { + DRM_ERROR("Failed to create hdcp session. HDCP TA is not initialized."); + return MOD_HDCP_STATUS_FAILURE; + } + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_create_session.display_handle = display->index; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_CREATE_SESSION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE; + + hdcp->auth.id = hdcp_cmd->out_msg.hdcp1_create_session.session_handle; + hdcp->auth.msg.hdcp1.ainfo = hdcp_cmd->out_msg.hdcp1_create_session.ainfo_primary; + memcpy(hdcp->auth.msg.hdcp1.aksv, hdcp_cmd->out_msg.hdcp1_create_session.aksv_primary, + sizeof(hdcp->auth.msg.hdcp1.aksv)); + memcpy(hdcp->auth.msg.hdcp1.an, hdcp_cmd->out_msg.hdcp1_create_session.an_primary, + sizeof(hdcp->auth.msg.hdcp1.an)); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_destroy_session.session_handle = hdcp->auth.id; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE; + + HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp); + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_first_part_authentication.session_handle = hdcp->auth.id; + + memcpy(hdcp_cmd->in_msg.hdcp1_first_part_authentication.bksv_primary, hdcp->auth.msg.hdcp1.bksv, + TA_HDCP__HDCP1_KSV_SIZE); + + hdcp_cmd->in_msg.hdcp1_first_part_authentication.r0_prime_primary = hdcp->auth.msg.hdcp1.r0p; + hdcp_cmd->in_msg.hdcp1_first_part_authentication.bcaps = hdcp->auth.msg.hdcp1.bcaps; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE; + + if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status == + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE) { + /* needs second part of authentication */ + hdcp->connection.is_repeater = 1; + } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status == + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) { + hdcp->connection.is_repeater = 0; + } else + return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE; + + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + struct mod_hdcp_display *display = get_first_added_display(hdcp); + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION; + + if (!is_dp_mst_hdcp(hdcp)) { + display->state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP1_ENABLED_TRACE(hdcp, display->index); + } + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_second_part_authentication.session_handle = hdcp->auth.id; + + hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list_size = hdcp->auth.msg.hdcp1.ksvlist_size; + memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.ksv_list, hdcp->auth.msg.hdcp1.ksvlist, + hdcp->auth.msg.hdcp1.ksvlist_size); + + memcpy(hdcp_cmd->in_msg.hdcp1_second_part_authentication.v_prime, hdcp->auth.msg.hdcp1.vp, + sizeof(hdcp->auth.msg.hdcp1.vp)); + + hdcp_cmd->in_msg.hdcp1_second_part_authentication.bstatus_binfo = + is_dp_hdcp(hdcp) ? hdcp->auth.msg.hdcp1.binfo_dp : hdcp->auth.msg.hdcp1.bstatus; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE; + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp) +{ + + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + int i = 0; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + + for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) { + + if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED || + hdcp->connection.displays[i].adjust.disable) + continue; + + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id; + hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE; + + hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED; + HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index); + } + + return MOD_HDCP_STATUS_SUCCESS; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_link_maintenance(struct mod_hdcp *hdcp) +{ + struct psp_context *psp = hdcp->config.psp.handle; + struct ta_hdcp_shared_memory *hdcp_cmd; + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf; + + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->in_msg.hdcp1_get_encryption_status.session_handle = hdcp->auth.id; + + hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level = 0; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE; + + return (hdcp_cmd->out_msg.hdcp1_get_encryption_status.protection_level == 1) + ? MOD_HDCP_STATUS_SUCCESS + : MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE; +} + +enum mod_hdcp_status mod_hdcp_hdcp1_get_link_encryption_status(struct mod_hdcp *hdcp, + enum mod_hdcp_encryption_status *encryption_status) +{ + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + if (mod_hdcp_hdcp1_link_maintenance(hdcp) != MOD_HDCP_STATUS_SUCCESS) + return MOD_HDCP_STATUS_FAILURE; + + *encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON; + + return MOD_HDCP_STATUS_SUCCESS; +} + diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h new file mode 100644 index 000000000000..986fc07ea9ea --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h @@ -0,0 +1,272 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MODULES_HDCP_HDCP_PSP_H_ +#define MODULES_HDCP_HDCP_PSP_H_ + +/* + * NOTE: These parameters are a one-to-one copy of the + * parameters required by PSP + */ +enum bgd_security_hdcp_encryption_level { + HDCP_ENCRYPTION_LEVEL__INVALID = 0, + HDCP_ENCRYPTION_LEVEL__OFF, + HDCP_ENCRYPTION_LEVEL__ON +}; + +enum ta_dtm_command { + TA_DTM_COMMAND__UNUSED_1 = 1, + TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2, + TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE +}; + +/* DTM related enumerations */ +/**********************************************************/ + +enum ta_dtm_status { + TA_DTM_STATUS__SUCCESS = 0x00, + TA_DTM_STATUS__GENERIC_FAILURE = 0x01, + TA_DTM_STATUS__INVALID_PARAMETER = 0x02, + TA_DTM_STATUS__NULL_POINTER = 0x3 +}; + +/* input/output structures for DTM commands */ +/**********************************************************/ +/** + * Input structures + */ +enum ta_dtm_hdcp_version_max_supported { + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__NONE = 0, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__1_x = 10, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_0 = 20, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_1 = 21, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2 = 22, + TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_3 = 23 +}; + +struct ta_dtm_topology_update_input_v2 { + /* display handle is unique across the driver and is used to identify a display */ + /* for all security interfaces which reference displays such as HDCP */ + uint32_t display_handle; + uint32_t is_active; + uint32_t is_miracast; + uint32_t controller; + uint32_t ddc_line; + uint32_t dig_be; + uint32_t dig_fe; + uint32_t dp_mst_vcid; + uint32_t is_assr; + uint32_t max_hdcp_supported_version; +}; + +struct ta_dtm_topology_assr_enable { + uint32_t display_topology_dig_be_index; +}; + +/** + * Output structures + */ + +/* No output structures yet */ + +union ta_dtm_cmd_input { + struct ta_dtm_topology_update_input_v2 topology_update_v2; + struct ta_dtm_topology_assr_enable topology_assr_enable; +}; + +union ta_dtm_cmd_output { + uint32_t reserved; +}; + +struct ta_dtm_shared_memory { + uint32_t cmd_id; + uint32_t resp_id; + enum ta_dtm_status dtm_status; + uint32_t reserved; + union ta_dtm_cmd_input dtm_in_message; + union ta_dtm_cmd_output dtm_out_message; +}; + +int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd, + uint64_t fence_mc_addr); + +enum ta_hdcp_command { + TA_HDCP_COMMAND__INITIALIZE, + TA_HDCP_COMMAND__HDCP1_CREATE_SESSION, + TA_HDCP_COMMAND__HDCP1_DESTROY_SESSION, + TA_HDCP_COMMAND__HDCP1_FIRST_PART_AUTHENTICATION, + TA_HDCP_COMMAND__HDCP1_SECOND_PART_AUTHENTICATION, + TA_HDCP_COMMAND__HDCP1_ENABLE_ENCRYPTION, + TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION, + TA_HDCP_COMMAND__HDCP1_GET_ENCRYPTION_STATUS, +}; + + +/* HDCP related enumerations */ +/**********************************************************/ +#define TA_HDCP__INVALID_SESSION 0xFFFF +#define TA_HDCP__HDCP1_AN_SIZE 8 +#define TA_HDCP__HDCP1_KSV_SIZE 5 +#define TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES 127 +#define TA_HDCP__HDCP1_V_PRIME_SIZE 20 + +enum ta_hdcp_status { + TA_HDCP_STATUS__SUCCESS = 0x00, + TA_HDCP_STATUS__GENERIC_FAILURE = 0x01, + TA_HDCP_STATUS__NULL_POINTER = 0x02, + TA_HDCP_STATUS__FAILED_ALLOCATING_SESSION = 0x03, + TA_HDCP_STATUS__FAILED_SETUP_TX = 0x04, + TA_HDCP_STATUS__INVALID_PARAMETER = 0x05, + TA_HDCP_STATUS__VHX_ERROR = 0x06, + TA_HDCP_STATUS__SESSION_NOT_CLOSED_PROPERLY = 0x07, + TA_HDCP_STATUS__SRM_FAILURE = 0x08, + TA_HDCP_STATUS__MST_AUTHENTICATED_ALREADY_STARTED = 0x09, + TA_HDCP_STATUS__AKE_SEND_CERT_FAILURE = 0x0A, + TA_HDCP_STATUS__AKE_NO_STORED_KM_FAILURE = 0x0B, + TA_HDCP_STATUS__AKE_SEND_HPRIME_FAILURE = 0x0C, + TA_HDCP_STATUS__LC_SEND_LPRIME_FAILURE = 0x0D, + TA_HDCP_STATUS__SKE_SEND_EKS_FAILURE = 0x0E, + TA_HDCP_STATUS__REPAUTH_SEND_RXIDLIST_FAILURE = 0x0F, + TA_HDCP_STATUS__REPAUTH_STREAM_READY_FAILURE = 0x10, + TA_HDCP_STATUS__ASD_GENERIC_FAILURE = 0x11, + TA_HDCP_STATUS__UNWRAP_SECRET_FAILURE = 0x12, + TA_HDCP_STATUS__ENABLE_ENCR_FAILURE = 0x13, + TA_HDCP_STATUS__DISABLE_ENCR_FAILURE = 0x14, + TA_HDCP_STATUS__NOT_ENOUGH_MEMORY_FAILURE = 0x15, + TA_HDCP_STATUS__UNKNOWN_MESSAGE = 0x16, + TA_HDCP_STATUS__TOO_MANY_STREAM = 0x17 +}; + +enum ta_hdcp_authentication_status { + TA_HDCP_AUTHENTICATION_STATUS__NOT_STARTED = 0x00, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_FAILED = 0x01, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_FIRST_PART_COMPLETE = 0x02, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_SECOND_PART_FAILED = 0x03, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED = 0x04, + TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09 +}; + + +/* input/output structures for HDCP commands */ +/**********************************************************/ +struct ta_hdcp_cmd_hdcp1_create_session_input { + uint8_t display_handle; +}; + +struct ta_hdcp_cmd_hdcp1_create_session_output { + uint32_t session_handle; + uint8_t an_primary[TA_HDCP__HDCP1_AN_SIZE]; + uint8_t aksv_primary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t ainfo_primary; + uint8_t an_secondary[TA_HDCP__HDCP1_AN_SIZE]; + uint8_t aksv_secondary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t ainfo_secondary; +}; + +struct ta_hdcp_cmd_hdcp1_destroy_session_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp1_first_part_authentication_input { + uint32_t session_handle; + uint8_t bksv_primary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t bksv_secondary[TA_HDCP__HDCP1_KSV_SIZE]; + uint8_t bcaps; + uint16_t r0_prime_primary; + uint16_t r0_prime_secondary; +}; + +struct ta_hdcp_cmd_hdcp1_first_part_authentication_output { + enum ta_hdcp_authentication_status authentication_status; +}; + +struct ta_hdcp_cmd_hdcp1_second_part_authentication_input { + uint32_t session_handle; + uint16_t bstatus_binfo; + uint8_t ksv_list[TA_HDCP__HDCP1_KSV_LIST_MAX_ENTRIES][TA_HDCP__HDCP1_KSV_SIZE]; + uint32_t ksv_list_size; + uint8_t pj_prime; + uint8_t v_prime[TA_HDCP__HDCP1_V_PRIME_SIZE]; +}; + +struct ta_hdcp_cmd_hdcp1_second_part_authentication_output { + enum ta_hdcp_authentication_status authentication_status; +}; + +struct ta_hdcp_cmd_hdcp1_enable_encryption_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input { + uint32_t session_handle; + uint32_t display_handle; +}; + +struct ta_hdcp_cmd_hdcp1_get_encryption_status_input { + uint32_t session_handle; +}; + +struct ta_hdcp_cmd_hdcp1_get_encryption_status_output { + uint32_t protection_level; +}; + +/**********************************************************/ +/* Common input structure for HDCP callbacks */ +union ta_hdcp_cmd_input { + struct ta_hdcp_cmd_hdcp1_create_session_input hdcp1_create_session; + struct ta_hdcp_cmd_hdcp1_destroy_session_input hdcp1_destroy_session; + struct ta_hdcp_cmd_hdcp1_first_part_authentication_input hdcp1_first_part_authentication; + struct ta_hdcp_cmd_hdcp1_second_part_authentication_input hdcp1_second_part_authentication; + struct ta_hdcp_cmd_hdcp1_enable_encryption_input hdcp1_enable_encryption; + struct ta_hdcp_cmd_hdcp1_enable_dp_stream_encryption_input hdcp1_enable_dp_stream_encryption; + struct ta_hdcp_cmd_hdcp1_get_encryption_status_input hdcp1_get_encryption_status; +}; + +/* Common output structure for HDCP callbacks */ +union ta_hdcp_cmd_output { + struct ta_hdcp_cmd_hdcp1_create_session_output hdcp1_create_session; + struct ta_hdcp_cmd_hdcp1_first_part_authentication_output hdcp1_first_part_authentication; + struct ta_hdcp_cmd_hdcp1_second_part_authentication_output hdcp1_second_part_authentication; + struct ta_hdcp_cmd_hdcp1_get_encryption_status_output hdcp1_get_encryption_status; +}; +/**********************************************************/ + +struct ta_hdcp_shared_memory { + uint32_t cmd_id; + enum ta_hdcp_status hdcp_status; + uint32_t reserved; + union ta_hdcp_cmd_input in_msg; + union ta_hdcp_cmd_output out_msg; +}; + +enum psp_status { + PSP_STATUS__SUCCESS = 0, + PSP_STATUS__ERROR_INVALID_PARAMS, + PSP_STATUS__ERROR_GENERIC, + PSP_STATUS__ERROR_OUT_OF_MEMORY, + PSP_STATUS__ERROR_UNSUPPORTED_FEATURE +}; + +#endif /* MODULES_HDCP_HDCP_PSP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h index dc187844d10b..dbe7835aabcf 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_freesync.h @@ -92,6 +92,7 @@ struct mod_vrr_params_btr { uint32_t inserted_duration_in_us; uint32_t frames_to_insert; uint32_t frame_counter; + uint32_t margin_in_us; }; struct mod_vrr_params_fixed_refresh { diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h new file mode 100644 index 000000000000..dea21702edff --- /dev/null +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h @@ -0,0 +1,289 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef MOD_HDCP_H_ +#define MOD_HDCP_H_ + +#include "os_types.h" +#include "signal_types.h" + +/* Forward Declarations */ +struct mod_hdcp; + +#define MAX_NUM_OF_DISPLAYS 6 +#define MAX_NUM_OF_ATTEMPTS 4 +#define MAX_NUM_OF_ERROR_TRACE 10 + +/* detailed return status */ +enum mod_hdcp_status { + MOD_HDCP_STATUS_SUCCESS = 0, + MOD_HDCP_STATUS_FAILURE, + MOD_HDCP_STATUS_RESET_NEEDED, + MOD_HDCP_STATUS_DISPLAY_OUT_OF_BOUND, + MOD_HDCP_STATUS_DISPLAY_NOT_FOUND, + MOD_HDCP_STATUS_INVALID_STATE, + MOD_HDCP_STATUS_NOT_IMPLEMENTED, + MOD_HDCP_STATUS_INTERNAL_POLICY_FAILURE, + MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE, + MOD_HDCP_STATUS_CREATE_PSP_SERVICE_FAILURE, + MOD_HDCP_STATUS_DESTROY_PSP_SERVICE_FAILURE, + MOD_HDCP_STATUS_HDCP1_CREATE_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP1_VALIDATE_ENCRYPTION_FAILURE, + MOD_HDCP_STATUS_HDCP1_NOT_HDCP_REPEATER, + MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE, + MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING, + MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE, + MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY, + MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE, + MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION, + MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE, + MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE, + MOD_HDCP_STATUS_HDCP1_MAX_DEVS_EXCEEDED_FAILURE, + MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE, + MOD_HDCP_STATUS_HDCP1_LINK_INTEGRITY_FAILURE, + MOD_HDCP_STATUS_HDCP1_REAUTH_REQUEST_ISSUED, + MOD_HDCP_STATUS_HDCP1_LINK_MAINTENANCE_FAILURE, + MOD_HDCP_STATUS_HDCP1_INVALID_BKSV, + MOD_HDCP_STATUS_DDC_FAILURE, /* TODO: specific errors */ + MOD_HDCP_STATUS_INVALID_OPERATION, + MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE, + MOD_HDCP_STATUS_HDCP2_CREATE_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREP_AKE_INIT_FAILURE, + MOD_HDCP_STATUS_HDCP2_AKE_CERT_PENDING, + MOD_HDCP_STATUS_HDCP2_H_PRIME_PENDING, + MOD_HDCP_STATUS_HDCP2_PAIRING_INFO_PENDING, + MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE, + MOD_HDCP_STATUS_HDCP2_VALIDATE_H_PRIME_FAILURE, + MOD_HDCP_STATUS_HDCP2_VALIDATE_PAIRING_INFO_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREP_LC_INIT_FAILURE, + MOD_HDCP_STATUS_HDCP2_L_PRIME_PENDING, + MOD_HDCP_STATUS_HDCP2_VALIDATE_L_PRIME_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREP_EKS_FAILURE, + MOD_HDCP_STATUS_HDCP2_ENABLE_ENCRYPTION_FAILURE, + MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_NOT_READY, + MOD_HDCP_STATUS_HDCP2_VALIDATE_RX_ID_LIST_FAILURE, + MOD_HDCP_STATUS_HDCP2_ENABLE_STREAM_ENCRYPTION, + MOD_HDCP_STATUS_HDCP2_STREAM_READY_PENDING, + MOD_HDCP_STATUS_HDCP2_VALIDATE_STREAM_READY_FAILURE, + MOD_HDCP_STATUS_HDCP2_PREPARE_STREAM_MANAGEMENT_FAILURE, + MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST, + MOD_HDCP_STATUS_HDCP2_REAUTH_LINK_INTEGRITY_FAILURE, + MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE, +}; + +struct mod_hdcp_displayport { + uint8_t rev; + uint8_t assr_supported; +}; + +struct mod_hdcp_hdmi { + uint8_t reserved; +}; +enum mod_hdcp_operation_mode { + MOD_HDCP_MODE_OFF, + MOD_HDCP_MODE_DEFAULT, + MOD_HDCP_MODE_DP, + MOD_HDCP_MODE_DP_MST +}; + +enum mod_hdcp_display_state { + MOD_HDCP_DISPLAY_INACTIVE = 0, + MOD_HDCP_DISPLAY_ACTIVE, + MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED, + MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED +}; + +struct mod_hdcp_ddc { + void *handle; + struct { + bool (*read_i2c)(void *handle, + uint32_t address, + uint8_t offset, + uint8_t *data, + uint32_t size); + bool (*write_i2c)(void *handle, + uint32_t address, + const uint8_t *data, + uint32_t size); + bool (*read_dpcd)(void *handle, + uint32_t address, + uint8_t *data, + uint32_t size); + bool (*write_dpcd)(void *handle, + uint32_t address, + const uint8_t *data, + uint32_t size); + } funcs; +}; + +struct mod_hdcp_psp { + void *handle; + void *funcs; +}; + +struct mod_hdcp_display_adjustment { + uint8_t disable : 1; + uint8_t reserved : 7; +}; + +struct mod_hdcp_link_adjustment_hdcp1 { + uint8_t disable : 1; + uint8_t postpone_encryption : 1; + uint8_t reserved : 6; +}; + +struct mod_hdcp_link_adjustment_hdcp2 { + uint8_t disable : 1; + uint8_t disable_type1 : 1; + uint8_t force_no_stored_km : 1; + uint8_t increase_h_prime_timeout: 1; + uint8_t reserved : 4; +}; + +struct mod_hdcp_link_adjustment { + uint8_t auth_delay; + struct mod_hdcp_link_adjustment_hdcp1 hdcp1; + struct mod_hdcp_link_adjustment_hdcp2 hdcp2; +}; + +struct mod_hdcp_error { + enum mod_hdcp_status status; + uint8_t state_id; +}; + +struct mod_hdcp_trace { + struct mod_hdcp_error errors[MAX_NUM_OF_ERROR_TRACE]; + uint8_t error_count; +}; + +enum mod_hdcp_encryption_status { + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF = 0, + MOD_HDCP_ENCRYPTION_STATUS_HDCP1_ON, + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON, + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON +}; + +/* per link events dm has to notify to hdcp module */ +enum mod_hdcp_event { + MOD_HDCP_EVENT_CALLBACK = 0, + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, + MOD_HDCP_EVENT_CPIRQ +}; + +/* output flags from module requesting timer operations */ +struct mod_hdcp_output { + uint8_t callback_needed; + uint8_t callback_stop; + uint8_t watchdog_timer_needed; + uint8_t watchdog_timer_stop; + uint16_t callback_delay; + uint16_t watchdog_timer_delay; +}; + +/* used to represent per display info */ +struct mod_hdcp_display { + enum mod_hdcp_display_state state; + uint8_t index; + uint8_t controller; + uint8_t dig_fe; + union { + uint8_t vc_id; + }; + struct mod_hdcp_display_adjustment adjust; +}; + +/* used to represent per link info */ +/* in case a link has multiple displays, they share the same link info */ +struct mod_hdcp_link { + enum mod_hdcp_operation_mode mode; + uint8_t dig_be; + uint8_t ddc_line; + union { + struct mod_hdcp_displayport dp; + struct mod_hdcp_hdmi hdmi; + }; + struct mod_hdcp_link_adjustment adjust; +}; + +/* a query structure for a display's hdcp information */ +struct mod_hdcp_display_query { + const struct mod_hdcp_display *display; + const struct mod_hdcp_link *link; + const struct mod_hdcp_trace *trace; + enum mod_hdcp_encryption_status encryption_status; +}; + +/* contains values per on external display configuration change */ +struct mod_hdcp_config { + struct mod_hdcp_psp psp; + struct mod_hdcp_ddc ddc; + uint8_t index; +}; + +struct mod_hdcp; + +/* dm allocates memory of mod_hdcp per dc_link on dm init based on memory size*/ +size_t mod_hdcp_get_memory_size(void); + +/* called per link on link creation */ +enum mod_hdcp_status mod_hdcp_setup(struct mod_hdcp *hdcp, + struct mod_hdcp_config *config); + +/* called per link on link destroy */ +enum mod_hdcp_status mod_hdcp_teardown(struct mod_hdcp *hdcp); + +/* called per display on cp_desired set to true */ +enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp, + struct mod_hdcp_link *link, struct mod_hdcp_display *display, + struct mod_hdcp_output *output); + +/* called per display on cp_desired set to false */ +enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_output *output); + +/* called to query hdcp information on a specific index */ +enum mod_hdcp_status mod_hdcp_query_display(struct mod_hdcp *hdcp, + uint8_t index, struct mod_hdcp_display_query *query); + +/* called per link on connectivity change */ +enum mod_hdcp_status mod_hdcp_reset_connection(struct mod_hdcp *hdcp, + struct mod_hdcp_output *output); + +/* called per link on events (i.e. callback, watchdog, CP_IRQ) */ +enum mod_hdcp_status mod_hdcp_process_event(struct mod_hdcp *hdcp, + enum mod_hdcp_event event, struct mod_hdcp_output *output); + +/* called to convert enum mod_hdcp_status to c string */ +char *mod_hdcp_status_to_str(int32_t status); + +/* called to convert state id to c string */ +char *mod_hdcp_state_id_to_str(int32_t id); + +/* called to convert signal type to operation mode */ +enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode( + enum signal_type signal); +#endif /* MOD_HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h index d930bdecb117..ca8ce3c55337 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -35,4 +35,7 @@ struct mod_vrr_params; void mod_build_vsc_infopacket(const struct dc_stream_state *stream, struct dc_info_packet *info_packet); +void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, + struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue); + #endif diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index d885d642ed7f..db6b08f6d093 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -31,6 +31,7 @@ #include "dc.h" #define HDMI_INFOFRAME_TYPE_VENDOR 0x81 +#define HF_VSIF_VERSION 1 // VTEM Byte Offset #define VTEM_PB0 0 @@ -395,3 +396,100 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, } +/** + ***************************************************************************** + * Function: mod_build_hf_vsif_infopacket + * + * @brief + * Prepare HDMI Vendor Specific info frame. + * Follows HDMI Spec to build up Vendor Specific info frame + * + * @param [in] stream: contains data we may need to construct VSIF (i.e. timing_3d_format, etc.) + * @param [out] info_packet: output structure where to store VSIF + ***************************************************************************** + */ +void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, + struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue) +{ + unsigned int length = 5; + bool hdmi_vic_mode = false; + uint8_t checksum = 0; + uint32_t i = 0; + enum dc_timing_3d_format format; + bool bALLM = (bool)ALLMEnabled; + bool bALLMVal = (bool)ALLMValue; + + info_packet->valid = false; + format = stream->timing.timing_3d_format; + if (stream->view_format == VIEW_3D_FORMAT_NONE) + format = TIMING_3D_FORMAT_NONE; + + if (stream->timing.hdmi_vic != 0 + && stream->timing.h_total >= 3840 + && stream->timing.v_total >= 2160 + && format == TIMING_3D_FORMAT_NONE) + hdmi_vic_mode = true; + + if ((format == TIMING_3D_FORMAT_NONE) && !hdmi_vic_mode && !bALLM) + return; + + info_packet->sb[1] = 0x03; + info_packet->sb[2] = 0x0C; + info_packet->sb[3] = 0x00; + + if (bALLM) { + info_packet->sb[1] = 0xD8; + info_packet->sb[2] = 0x5D; + info_packet->sb[3] = 0xC4; + info_packet->sb[4] = HF_VSIF_VERSION; + } + + if (format != TIMING_3D_FORMAT_NONE) + info_packet->sb[4] = (2 << 5); + + else if (hdmi_vic_mode) + info_packet->sb[4] = (1 << 5); + + switch (format) { + case TIMING_3D_FORMAT_HW_FRAME_PACKING: + case TIMING_3D_FORMAT_SW_FRAME_PACKING: + info_packet->sb[5] = (0x0 << 4); + break; + + case TIMING_3D_FORMAT_SIDE_BY_SIDE: + case TIMING_3D_FORMAT_SBS_SW_PACKED: + info_packet->sb[5] = (0x8 << 4); + length = 6; + break; + + case TIMING_3D_FORMAT_TOP_AND_BOTTOM: + case TIMING_3D_FORMAT_TB_SW_PACKED: + info_packet->sb[5] = (0x6 << 4); + break; + + default: + break; + } + + if (hdmi_vic_mode) + info_packet->sb[5] = stream->timing.hdmi_vic; + + info_packet->hb0 = HDMI_INFOFRAME_TYPE_VENDOR; + info_packet->hb1 = 0x01; + info_packet->hb2 = (uint8_t) (length); + + if (bALLM) + info_packet->sb[5] = (info_packet->sb[5] & ~0x02) | (bALLMVal << 1); + + checksum += info_packet->hb0; + checksum += info_packet->hb1; + checksum += info_packet->hb2; + + for (i = 1; i <= length; i++) + checksum += info_packet->sb[i]; + + info_packet->sb[0] = (uint8_t) (0x100 - checksum); + + info_packet->valid = true; +} + diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index 05e2be856037..4e2f615c3566 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -80,18 +80,18 @@ struct abm_parameters { static const struct abm_parameters abm_settings_config0[abm_defines_max_level] = { // min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee - {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xE0}, - {0xff, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xE0}, - {0xff, 0x40, 0x20, 0x00, 0xff, 0x90, 0x68, 0x40, 0xE0}, - {0x82, 0x4d, 0x20, 0x00, 0x00, 0x90, 0xb3, 0x70, 0x70}, + {0xff, 0xbf, 0x20, 0x00, 0xff, 0x99, 0xb3, 0x40, 0xe0}, + {0xde, 0x85, 0x20, 0x00, 0xff, 0x90, 0xa8, 0x40, 0xdf}, + {0xb0, 0x50, 0x20, 0x00, 0xc0, 0x88, 0x78, 0x70, 0xa0}, + {0x82, 0x40, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, }; static const struct abm_parameters abm_settings_config1[abm_defines_max_level] = { // min_red max_red bright_pos dark_pos brightness_gain contrast deviation min_knee max_knee - {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, - {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, - {0x99, 0x65, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, - {0x82, 0x4d, 0x20, 0x00, 0x00, 0xa8, 0xb3, 0x70, 0x70}, + {0xf0, 0xd9, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, + {0xcd, 0xa5, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, + {0x99, 0x65, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, + {0x82, 0x4d, 0x20, 0x00, 0x00, 0xff, 0xb3, 0x70, 0x70}, }; static const struct abm_parameters * const abm_settings[] = { @@ -115,7 +115,7 @@ static const struct abm_parameters * const abm_settings[] = { /* NOTE: iRAM is 256B in size */ struct iram_table_v_2 { /* flags */ - uint16_t flags; /* 0x00 U16 */ + uint16_t min_abm_backlight; /* 0x00 U16 */ /* parameters for ABM2.0 algorithm */ uint8_t min_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x02 U0.8 */ @@ -140,10 +140,10 @@ struct iram_table_v_2 { /* For reading PSR State directly from IRAM */ uint8_t psr_state; /* 0xf0 */ - uint8_t dmcu_mcp_interface_version; /* 0xf1 */ - uint8_t dmcu_abm_feature_version; /* 0xf2 */ - uint8_t dmcu_psr_feature_version; /* 0xf3 */ - uint16_t dmcu_version; /* 0xf4 */ + uint8_t dmcu_mcp_interface_version; /* 0xf1 */ + uint8_t dmcu_abm_feature_version; /* 0xf2 */ + uint8_t dmcu_psr_feature_version; /* 0xf3 */ + uint16_t dmcu_version; /* 0xf4 */ uint8_t dmcu_state; /* 0xf6 */ uint16_t blRampReduction; /* 0xf7 */ @@ -164,42 +164,43 @@ struct iram_table_v_2_2 { uint8_t max_reduction[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x16 U0.8 */ uint8_t bright_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x2a U2.6 */ uint8_t dark_pos_gain[NUM_AMBI_LEVEL][NUM_AGGR_LEVEL]; /* 0x3e U2.6 */ - uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */ - uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */ - uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */ - uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */ - uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */ - uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */ - uint8_t pad[21]; /* 0x6b U0.8 */ + uint8_t hybrid_factor[NUM_AGGR_LEVEL]; /* 0x52 U0.8 */ + uint8_t contrast_factor[NUM_AGGR_LEVEL]; /* 0x56 U0.8 */ + uint8_t deviation_gain[NUM_AGGR_LEVEL]; /* 0x5a U0.8 */ + uint8_t iir_curve[NUM_AMBI_LEVEL]; /* 0x5e U0.8 */ + uint8_t min_knee[NUM_AGGR_LEVEL]; /* 0x63 U0.8 */ + uint8_t max_knee[NUM_AGGR_LEVEL]; /* 0x67 U0.8 */ + uint16_t min_abm_backlight; /* 0x6b U16 */ + uint8_t pad[19]; /* 0x6d U0.8 */ /* parameters for crgb conversion */ - uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */ - uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */ - uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */ + uint16_t crgb_thresh[NUM_POWER_FN_SEGS]; /* 0x80 U3.13 */ + uint16_t crgb_offset[NUM_POWER_FN_SEGS]; /* 0x90 U1.15 */ + uint16_t crgb_slope[NUM_POWER_FN_SEGS]; /* 0xa0 U4.12 */ /* parameters for custom curve */ /* thresholds for brightness --> backlight */ - uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */ + uint16_t backlight_thresholds[NUM_BL_CURVE_SEGS]; /* 0xb0 U16.0 */ /* offsets for brightness --> backlight */ - uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */ + uint16_t backlight_offsets[NUM_BL_CURVE_SEGS]; /* 0xd0 U16.0 */ /* For reading PSR State directly from IRAM */ - uint8_t psr_state; /* 0xf0 */ - uint8_t dmcu_mcp_interface_version; /* 0xf1 */ - uint8_t dmcu_abm_feature_version; /* 0xf2 */ - uint8_t dmcu_psr_feature_version; /* 0xf3 */ - uint16_t dmcu_version; /* 0xf4 */ - uint8_t dmcu_state; /* 0xf6 */ - - uint8_t dummy1; /* 0xf7 */ - uint8_t dummy2; /* 0xf8 */ - uint8_t dummy3; /* 0xf9 */ - uint8_t dummy4; /* 0xfa */ - uint8_t dummy5; /* 0xfb */ - uint8_t dummy6; /* 0xfc */ - uint8_t dummy7; /* 0xfd */ - uint8_t dummy8; /* 0xfe */ - uint8_t dummy9; /* 0xff */ + uint8_t psr_state; /* 0xf0 */ + uint8_t dmcu_mcp_interface_version; /* 0xf1 */ + uint8_t dmcu_abm_feature_version; /* 0xf2 */ + uint8_t dmcu_psr_feature_version; /* 0xf3 */ + uint16_t dmcu_version; /* 0xf4 */ + uint8_t dmcu_state; /* 0xf6 */ + + uint8_t dummy1; /* 0xf7 */ + uint8_t dummy2; /* 0xf8 */ + uint8_t dummy3; /* 0xf9 */ + uint8_t dummy4; /* 0xfa */ + uint8_t dummy5; /* 0xfb */ + uint8_t dummy6; /* 0xfc */ + uint8_t dummy7; /* 0xfd */ + uint8_t dummy8; /* 0xfe */ + uint8_t dummy9; /* 0xff */ }; #pragma pack(pop) @@ -271,7 +272,8 @@ void fill_iram_v_2(struct iram_table_v_2 *ram_table, struct dmcu_iram_parameters { unsigned int set = params.set; - ram_table->flags = 0x0; + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); ram_table->deviation_gain = 0xb3; ram_table->blRampReduction = @@ -445,6 +447,9 @@ void fill_iram_v_2_2(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame ram_table->flags = 0x0; + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); + ram_table->deviation_gain[0] = 0xb3; ram_table->deviation_gain[1] = 0xa8; ram_table->deviation_gain[2] = 0x98; @@ -588,6 +593,10 @@ void fill_iram_v_2_3(struct iram_table_v_2_2 *ram_table, struct dmcu_iram_parame unsigned int set = params.set; ram_table->flags = 0x0; + + ram_table->min_abm_backlight = + cpu_to_be16(params.min_abm_backlight); + for (i = 0; i < NUM_AGGR_LEVEL; i++) { ram_table->hybrid_factor[i] = abm_settings[set][i].brightness_gain; ram_table->contrast_factor[i] = abm_settings[set][i].contrast_factor; diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index da5df00fedce..e54157026330 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -38,6 +38,7 @@ struct dmcu_iram_parameters { unsigned int backlight_lut_array_size; unsigned int backlight_ramping_reduction; unsigned int backlight_ramping_start; + unsigned int min_abm_backlight; unsigned int set; }; diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 8889aaceec60..dc7eb28f0296 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -143,6 +143,8 @@ enum PP_FEATURE_MASK { enum DC_FEATURE_MASK { DC_FBC_MASK = 0x1, DC_MULTI_MON_PP_MCLK_SWITCH_MASK = 0x2, + DC_DISABLE_FRACTIONAL_PWM_MASK = 0x4, + DC_PSR_MASK = 0x8, }; enum amd_dpm_forced_level; diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h index a761ba07f937..fce965984e76 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_d.h @@ -27,6 +27,7 @@ #define mmMM_INDEX 0x0 #define mmMM_INDEX_HI 0x6 #define mmMM_DATA 0x1 +#define mmCC_BIF_BX_FUSESTRAP0 0x14D7 #define mmBUS_CNTL 0x1508 #define mmCONFIG_CNTL 0x1509 #define mmCONFIG_MEMSIZE 0x150a diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h index 8fbfd0261d27..39cc4880beb4 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_4_1_sh_mask.h @@ -32,6 +32,8 @@ #define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 #define MM_DATA__MM_DATA_MASK 0xffffffff #define MM_DATA__MM_DATA__SHIFT 0x0 +#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2 +#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1 #define BUS_CNTL__BIOS_ROM_WRT_EN_MASK 0x1 #define BUS_CNTL__BIOS_ROM_WRT_EN__SHIFT 0x0 #define BUS_CNTL__BIOS_ROM_DIS_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h index 809759f7bb81..8d05d6ca1c8d 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_d.h @@ -27,6 +27,7 @@ #define mmMM_INDEX 0x0 #define mmMM_INDEX_HI 0x6 #define mmMM_DATA 0x1 +#define mmCC_BIF_BX_FUSESTRAP0 0x14D7 #define mmCC_BIF_BX_STRAP2 0x152A #define mmBIF_MM_INDACCESS_CNTL 0x1500 #define mmBIF_DOORBELL_APER_EN 0x1501 diff --git a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h index adc71b01f793..73435687d049 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/bif/bif_5_0_sh_mask.h @@ -32,6 +32,8 @@ #define MM_INDEX_HI__MM_OFFSET_HI__SHIFT 0x0 #define MM_DATA__MM_DATA_MASK 0xffffffff #define MM_DATA__MM_DATA__SHIFT 0x0 +#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK 0x2 +#define CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE__SHIFT 0x1 #define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS_MASK 0x2 #define BIF_MM_INDACCESS_CNTL__MM_INDACCESS_DIS__SHIFT 0x1 #define BIF_DOORBELL_APER_EN__BIF_DOORBELL_APER_EN_MASK 0x1 diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h index be4249adb356..eddf83ec1c39 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_2_1_0_offset.h @@ -9859,6 +9859,8 @@ #define mmDP0_DP_STEER_FIFO_BASE_IDX 2 #define mmDP0_DP_MSA_MISC 0x210e #define mmDP0_DP_MSA_MISC_BASE_IDX 2 +#define mmDP0_DP_DPHY_INTERNAL_CTRL 0x210f +#define mmDP0_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP0_DP_VID_TIMING 0x2110 #define mmDP0_DP_VID_TIMING_BASE_IDX 2 #define mmDP0_DP_VID_N 0x2111 @@ -10187,6 +10189,8 @@ #define mmDP1_DP_STEER_FIFO_BASE_IDX 2 #define mmDP1_DP_MSA_MISC 0x220e #define mmDP1_DP_MSA_MISC_BASE_IDX 2 +#define mmDP1_DP_DPHY_INTERNAL_CTRL 0x220f +#define mmDP1_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP1_DP_VID_TIMING 0x2210 #define mmDP1_DP_VID_TIMING_BASE_IDX 2 #define mmDP1_DP_VID_N 0x2211 @@ -10515,6 +10519,8 @@ #define mmDP2_DP_STEER_FIFO_BASE_IDX 2 #define mmDP2_DP_MSA_MISC 0x230e #define mmDP2_DP_MSA_MISC_BASE_IDX 2 +#define mmDP2_DP_DPHY_INTERNAL_CTRL 0x230f +#define mmDP2_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP2_DP_VID_TIMING 0x2310 #define mmDP2_DP_VID_TIMING_BASE_IDX 2 #define mmDP2_DP_VID_N 0x2311 @@ -10843,6 +10849,8 @@ #define mmDP3_DP_STEER_FIFO_BASE_IDX 2 #define mmDP3_DP_MSA_MISC 0x240e #define mmDP3_DP_MSA_MISC_BASE_IDX 2 +#define mmDP3_DP_DPHY_INTERNAL_CTRL 0x240f +#define mmDP3_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP3_DP_VID_TIMING 0x2410 #define mmDP3_DP_VID_TIMING_BASE_IDX 2 #define mmDP3_DP_VID_N 0x2411 @@ -11171,6 +11179,8 @@ #define mmDP4_DP_STEER_FIFO_BASE_IDX 2 #define mmDP4_DP_MSA_MISC 0x250e #define mmDP4_DP_MSA_MISC_BASE_IDX 2 +#define mmDP4_DP_DPHY_INTERNAL_CTRL 0x250f +#define mmDP4_DP_DPHY_INTERNAL_CTRL_BASE_IDX 2 #define mmDP4_DP_VID_TIMING 0x2510 #define mmDP4_DP_VID_TIMING_BASE_IDX 2 #define mmDP4_DP_VID_N 0x2511 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h index ca16d9125fbc..2bfaaa8157d0 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_offset.h @@ -1146,7 +1146,14 @@ #define mmATC_L2_MEM_POWER_LS_BASE_IDX 0 #define mmATC_L2_CGTT_CLK_CTRL 0x080c #define mmATC_L2_CGTT_CLK_CTRL_BASE_IDX 0 - +#define mmATC_L2_CACHE_4K_EDC_INDEX 0x080e +#define mmATC_L2_CACHE_4K_EDC_INDEX_BASE_IDX 0 +#define mmATC_L2_CACHE_2M_EDC_INDEX 0x080f +#define mmATC_L2_CACHE_2M_EDC_INDEX_BASE_IDX 0 +#define mmATC_L2_CACHE_4K_EDC_CNT 0x0810 +#define mmATC_L2_CACHE_4K_EDC_CNT_BASE_IDX 0 +#define mmATC_L2_CACHE_2M_EDC_CNT 0x0811 +#define mmATC_L2_CACHE_2M_EDC_CNT_BASE_IDX 0 // addressBlock: gc_utcl2_vml2pfdec // base address: 0xa100 @@ -1206,7 +1213,14 @@ #define mmVM_L2_CACHE_PARITY_CNTL_BASE_IDX 0 #define mmVM_L2_CGTT_CLK_CTRL 0x085e #define mmVM_L2_CGTT_CLK_CTRL_BASE_IDX 0 - +#define mmVM_L2_MEM_ECC_INDEX 0x0860 +#define mmVM_L2_MEM_ECC_INDEX_BASE_IDX 0 +#define mmVM_L2_WALKER_MEM_ECC_INDEX 0x0861 +#define mmVM_L2_WALKER_MEM_ECC_INDEX_BASE_IDX 0 +#define mmVM_L2_MEM_ECC_CNT 0x0862 +#define mmVM_L2_MEM_ECC_CNT_BASE_IDX 0 +#define mmVM_L2_WALKER_MEM_ECC_CNT 0x0863 +#define mmVM_L2_WALKER_MEM_ECC_CNT_BASE_IDX 0 // addressBlock: gc_utcl2_vml2vcdec // base address: 0xa200 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h index 064c4bb1dc62..d4c613a85352 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_9_0_sh_mask.h @@ -6661,7 +6661,6 @@ #define ATC_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L #define ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L - // addressBlock: gc_utcl2_vml2pfdec //VM_L2_CNTL #define VM_L2_CNTL__ENABLE_L2_CACHE__SHIFT 0x0 @@ -6991,7 +6990,22 @@ #define VM_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK 0x00008000L #define VM_L2_CGTT_CLK_CTRL__SOFT_STALL_OVERRIDE_MASK 0x00FF0000L #define VM_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK 0xFF000000L - +//VM_L2_MEM_ECC_INDEX +#define VM_L2_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define VM_L2_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//VM_L2_WALKER_MEM_ECC_INDEX +#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX__SHIFT 0x0 +#define VM_L2_WALKER_MEM_ECC_INDEX__INDEX_MASK 0x000000FFL +//VM_L2_MEM_ECC_CNT +#define VM_L2_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc +#define VM_L2_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe +#define VM_L2_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L +#define VM_L2_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L +//VM_L2_WALKER_MEM_ECC_CNT +#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT__SHIFT 0xc +#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT__SHIFT 0xe +#define VM_L2_WALKER_MEM_ECC_CNT__SEC_COUNT_MASK 0x00003000L +#define VM_L2_WALKER_MEM_ECC_CNT__DED_COUNT_MASK 0x0000C000L // addressBlock: gc_utcl2_vml2vcdec //VM_CONTEXT0_CNTL diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h index 4bcacf529852..991128bb9476 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_0_smn.h @@ -22,6 +22,9 @@ #ifndef _nbio_7_4_0_SMN_HEADER #define _nbio_7_4_0_SMN_HEADER +// addressBlock: nbio_nbif0_bif_ras_bif_ras_regblk +// base address: 0x10100000 +#define smnBIFL_RAS_CENTRAL_STATUS 0x10139040 #define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c #define smnCPM_CONTROL 0x11180460 @@ -53,4 +56,13 @@ #define smnPCIE_RX_NUM_NAK 0x11180038 #define smnPCIE_RX_NUM_NAK_GENERATED 0x1118003c +// addressBlock: nbio_iohub_nb_misc_misc_cfgdec +// base address: 0x13a10000 +#define smnIOHC_INTERRUPT_EOI 0x13a10120 + +// addressBlock: nbio_iohub_nb_rascfg_ras_cfgdec +// base address: 0x13a20000 +#define smnRAS_GLOBAL_STATUS_LO 0x13a20020 +#define smnRAS_GLOBAL_STATUS_HI 0x13a20024 + #endif // _nbio_7_4_0_SMN_HEADER diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h index 994e796a28d7..ce5830ebe095 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_offset.h @@ -2793,8 +2793,8 @@ #define mmBIF_DOORBELL_INT_CNTL_BASE_IDX 2 #define mmBIF_FB_EN 0x00ff #define mmBIF_FB_EN_BASE_IDX 2 -#define mmBIF_BUSY_DELAY_CNTR 0x0100 -#define mmBIF_BUSY_DELAY_CNTR_BASE_IDX 2 +#define mmBIF_INTR_CNTL 0x0100 +#define mmBIF_INTR_CNTL_BASE_IDX 2 #define mmBIF_MST_TRANS_PENDING_VF 0x0109 #define mmBIF_MST_TRANS_PENDING_VF_BASE_IDX 2 #define mmBIF_SLV_TRANS_PENDING_VF 0x010a diff --git a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h index d467b939c971..07f04b2b5bdd 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/nbio/nbio_7_4_sh_mask.h @@ -20420,9 +20420,9 @@ #define BIF_FB_EN__FB_WRITE_EN__SHIFT 0x1 #define BIF_FB_EN__FB_READ_EN_MASK 0x00000001L #define BIF_FB_EN__FB_WRITE_EN_MASK 0x00000002L -//BIF_BUSY_DELAY_CNTR -#define BIF_BUSY_DELAY_CNTR__DELAY_CNT__SHIFT 0x0 -#define BIF_BUSY_DELAY_CNTR__DELAY_CNT_MASK 0x0000003FL +//BIF_INTR_CNTL +#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL__SHIFT 0x0 +#define BIF_INTR_CNTL__RAS_INTR_VEC_SEL_MASK 0x00000001L //BIF_MST_TRANS_PENDING_VF #define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING__SHIFT 0x0 #define BIF_MST_TRANS_PENDING_VF__BIF_MST_TRANS_PENDING_MASK 0x7FFFFFFFL @@ -48436,4 +48436,47 @@ #define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_1_MASK 0x00000002L #define RCC_DEV0_EPF0_VF15_GFXMSIX_PBA__MSIX_PENDING_BITS_2_MASK 0x00000004L +//IOHC_INTERRUPT_EOI +#define IOHC_INTERRUPT_EOI__SMI_EOI__SHIFT 0x0 +#define IOHC_INTERRUPT_EOI__SCI_EOI__SHIFT 0x1 +#define IOHC_INTERRUPT_EOI__NMI_EOI__SHIFT 0x2 +#define IOHC_INTERRUPT_EOI__SMI_EOI_MASK 0x00000001L +#define IOHC_INTERRUPT_EOI__SCI_EOI_MASK 0x00000002L +#define IOHC_INTERRUPT_EOI__NMI_EOI_MASK 0x00000004L + +//RAS_GLOBAL_STATUS_LO +#define RAS_GLOBAL_STATUS_LO__ParityErrCorr__SHIFT 0x0 +#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal__SHIFT 0x1 +#define RAS_GLOBAL_STATUS_LO__ParityErrFatal__SHIFT 0x2 +#define RAS_GLOBAL_STATUS_LO__ParityErrSerr__SHIFT 0x3 +#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI__SHIFT 0x6 +#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI__SHIFT 0x7 +#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI__SHIFT 0x8 +#define RAS_GLOBAL_STATUS_LO__SW_SMI__SHIFT 0x9 +#define RAS_GLOBAL_STATUS_LO__SW_SCI__SHIFT 0xa +#define RAS_GLOBAL_STATUS_LO__SW_NMI__SHIFT 0xb +#define RAS_GLOBAL_STATUS_LO__APML_NMI__SHIFT 0xc +#define RAS_GLOBAL_STATUS_LO__APML_SyncFld__SHIFT 0xd +#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI__SHIFT 0xe +#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private__SHIFT 0xf +#define RAS_GLOBAL_STATUS_LO__ParityErrCorr_MASK 0x00000001L +#define RAS_GLOBAL_STATUS_LO__ParityErrNonFatal_MASK 0x00000002L +#define RAS_GLOBAL_STATUS_LO__ParityErrFatal_MASK 0x00000004L +#define RAS_GLOBAL_STATUS_LO__ParityErrSerr_MASK 0x00000008L +#define RAS_GLOBAL_STATUS_LO__HPLGWA_NMI_MASK 0x00000040L +#define RAS_GLOBAL_STATUS_LO__HPLGWA_SCI_MASK 0x00000080L +#define RAS_GLOBAL_STATUS_LO__HPLGWA_SMI_MASK 0x00000100L +#define RAS_GLOBAL_STATUS_LO__SW_SMI_MASK 0x00000200L +#define RAS_GLOBAL_STATUS_LO__SW_SCI_MASK 0x00000400L +#define RAS_GLOBAL_STATUS_LO__SW_NMI_MASK 0x00000800L +#define RAS_GLOBAL_STATUS_LO__APML_NMI_MASK 0x00001000L +#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_MASK 0x00002000L +#define RAS_GLOBAL_STATUS_LO__PIN_SyncFld_NMI_MASK 0x00004000L +#define RAS_GLOBAL_STATUS_LO__APML_SyncFld_Private_MASK 0x00008000L +//RAS_GLOBAL_STATUS_HI +#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr__SHIFT 0x0 +#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr__SHIFT 0x1 +#define RAS_GLOBAL_STATUS_HI__PCIE0PortAErr_MASK 0x00000001L +#define RAS_GLOBAL_STATUS_HI__NBIF0PortAErr_MASK 0x00000002L + #endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h index dc9895a684fe..096d878eb1de 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/oss/osssys_4_0_sh_mask.h @@ -588,11 +588,15 @@ #define IH_STORM_CLIENT_LIST_CNTL__CLIENT30_IS_STORM_CLIENT_MASK 0x40000000L #define IH_STORM_CLIENT_LIST_CNTL__CLIENT31_IS_STORM_CLIENT_MASK 0x80000000L //IH_CLK_CTRL +#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x19 +#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE__SHIFT 0x1a #define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE__SHIFT 0x1b #define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE__SHIFT 0x1c #define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE__SHIFT 0x1d #define IH_CLK_CTRL__DYN_CLK_SOFT_OVERRIDE__SHIFT 0x1e #define IH_CLK_CTRL__REG_CLK_SOFT_OVERRIDE__SHIFT 0x1f +#define IH_CLK_CTRL__IH_RETRY_INT_CAM_MEM_CLK_SOFT_OVERRIDE_MASK 0x02000000L +#define IH_CLK_CTRL__IH_BUFFER_MEM_CLK_SOFT_OVERRIDE_MASK 0x04000000L #define IH_CLK_CTRL__DBUS_MUX_CLK_SOFT_OVERRIDE_MASK 0x08000000L #define IH_CLK_CTRL__OSSSYS_SHARE_CLK_SOFT_OVERRIDE_MASK 0x10000000L #define IH_CLK_CTRL__LIMIT_SMN_CLK_SOFT_OVERRIDE_MASK 0x20000000L diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h index dbc2e723f659..71169daa701a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_d.h @@ -49,6 +49,7 @@ #define ixCG_SPLL_FUNC_CNTL_5 0xc0500150 #define ixCG_SPLL_FUNC_CNTL_6 0xc0500154 #define ixCG_SPLL_FUNC_CNTL_7 0xc0500158 +#define ixCG_SPLL_STATUS 0xC050015C #define ixSPLL_CNTL_MODE 0xc0500160 #define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164 #define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h index 6af9f0217b34..61a9a84e0c3a 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_0_1_sh_mask.h @@ -194,6 +194,8 @@ #define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19 #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0 #define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h index bd3685166779..351446754c72 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_d.h @@ -49,6 +49,7 @@ #define ixCG_SPLL_FUNC_CNTL_5 0xc0500150 #define ixCG_SPLL_FUNC_CNTL_6 0xc0500154 #define ixCG_SPLL_FUNC_CNTL_7 0xc0500158 +#define ixCG_SPLL_STATUS 0xC050015C #define ixSPLL_CNTL_MODE 0xc0500160 #define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164 #define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h index 627906674fe8..4bfd5f8ba66c 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_2_sh_mask.h @@ -194,6 +194,8 @@ #define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19 #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0 #define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h index f35aba72e640..21da61c398f5 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_d.h @@ -52,6 +52,7 @@ #define ixCG_SPLL_FUNC_CNTL_5 0xc0500150 #define ixCG_SPLL_FUNC_CNTL_6 0xc0500154 #define ixCG_SPLL_FUNC_CNTL_7 0xc0500158 +#define ixCG_SPLL_STATUS 0xC050015C #define ixSPLL_CNTL_MODE 0xc0500160 #define ixCG_SPLL_SPREAD_SPECTRUM 0xc0500164 #define ixCG_SPLL_SPREAD_SPECTRUM_2 0xc0500168 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h index 481ee6560aa9..f64fe0fbcb32 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smu/smu_7_1_3_sh_mask.h @@ -220,6 +220,8 @@ #define CG_SPLL_FUNC_CNTL_6__SPLL_LF_CNTR__SHIFT 0x19 #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL_MASK 0xfff #define CG_SPLL_FUNC_CNTL_7__SPLL_BW_CNTRL__SHIFT 0x0 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK 0x2 +#define CG_SPLL_STATUS__SPLL_CHG_STATUS__SHIFT 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL_MASK 0x1 #define SPLL_CNTL_MODE__SPLL_SW_DIR_CONTROL__SHIFT 0x0 #define SPLL_CNTL_MODE__SPLL_LEGACY_PDIV_MASK 0x2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h index d3876052562b..687d6843c258 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_offset.h @@ -121,6 +121,98 @@ #define mmCKSVII2C_IC_COMP_VERSION_BASE_IDX 0 #define mmCKSVII2C_IC_COMP_TYPE 0x006d #define mmCKSVII2C_IC_COMP_TYPE_BASE_IDX 0 +#define mmCKSVII2C1_IC_CON 0x0080 +#define mmCKSVII2C1_IC_CON_BASE_IDX 0 +#define mmCKSVII2C1_IC_TAR 0x0081 +#define mmCKSVII2C1_IC_TAR_BASE_IDX 0 +#define mmCKSVII2C1_IC_SAR 0x0082 +#define mmCKSVII2C1_IC_SAR_BASE_IDX 0 +#define mmCKSVII2C1_IC_HS_MADDR 0x0083 +#define mmCKSVII2C1_IC_HS_MADDR_BASE_IDX 0 +#define mmCKSVII2C1_IC_DATA_CMD 0x0084 +#define mmCKSVII2C1_IC_DATA_CMD_BASE_IDX 0 +#define mmCKSVII2C1_IC_SS_SCL_HCNT 0x0085 +#define mmCKSVII2C1_IC_SS_SCL_HCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_SS_SCL_LCNT 0x0086 +#define mmCKSVII2C1_IC_SS_SCL_LCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_FS_SCL_HCNT 0x0087 +#define mmCKSVII2C1_IC_FS_SCL_HCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_FS_SCL_LCNT 0x0088 +#define mmCKSVII2C1_IC_FS_SCL_LCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_HS_SCL_HCNT 0x0089 +#define mmCKSVII2C1_IC_HS_SCL_HCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_HS_SCL_LCNT 0x008a +#define mmCKSVII2C1_IC_HS_SCL_LCNT_BASE_IDX 0 +#define mmCKSVII2C1_IC_INTR_STAT 0x008b +#define mmCKSVII2C1_IC_INTR_STAT_BASE_IDX 0 +#define mmCKSVII2C1_IC_INTR_MASK 0x008c +#define mmCKSVII2C1_IC_INTR_MASK_BASE_IDX 0 +#define mmCKSVII2C1_IC_RAW_INTR_STAT 0x008d +#define mmCKSVII2C1_IC_RAW_INTR_STAT_BASE_IDX 0 +#define mmCKSVII2C1_IC_RX_TL 0x008e +#define mmCKSVII2C1_IC_RX_TL_BASE_IDX 0 +#define mmCKSVII2C1_IC_TX_TL 0x008f +#define mmCKSVII2C1_IC_TX_TL_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_INTR 0x0090 +#define mmCKSVII2C1_IC_CLR_INTR_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RX_UNDER 0x0091 +#define mmCKSVII2C1_IC_CLR_RX_UNDER_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RX_OVER 0x0092 +#define mmCKSVII2C1_IC_CLR_RX_OVER_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_TX_OVER 0x0093 +#define mmCKSVII2C1_IC_CLR_TX_OVER_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RD_REQ 0x0094 +#define mmCKSVII2C1_IC_CLR_RD_REQ_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_TX_ABRT 0x0095 +#define mmCKSVII2C1_IC_CLR_TX_ABRT_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RX_DONE 0x0096 +#define mmCKSVII2C1_IC_CLR_RX_DONE_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_ACTIVITY 0x0097 +#define mmCKSVII2C1_IC_CLR_ACTIVITY_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_STOP_DET 0x0098 +#define mmCKSVII2C1_IC_CLR_STOP_DET_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_START_DET 0x0099 +#define mmCKSVII2C1_IC_CLR_START_DET_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_GEN_CALL 0x009a +#define mmCKSVII2C1_IC_CLR_GEN_CALL_BASE_IDX 0 +#define mmCKSVII2C1_IC_ENABLE 0x009b +#define mmCKSVII2C1_IC_ENABLE_BASE_IDX 0 +#define mmCKSVII2C1_IC_STATUS 0x009c +#define mmCKSVII2C1_IC_STATUS_BASE_IDX 0 +#define mmCKSVII2C1_IC_TXFLR 0x009d +#define mmCKSVII2C1_IC_TXFLR_BASE_IDX 0 +#define mmCKSVII2C1_IC_RXFLR 0x009e +#define mmCKSVII2C1_IC_RXFLR_BASE_IDX 0 +#define mmCKSVII2C1_IC_SDA_HOLD 0x009f +#define mmCKSVII2C1_IC_SDA_HOLD_BASE_IDX 0 +#define mmCKSVII2C1_IC_TX_ABRT_SOURCE 0x00a0 +#define mmCKSVII2C1_IC_TX_ABRT_SOURCE_BASE_IDX 0 +#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY 0x00a1 +#define mmCKSVII2C1_IC_SLV_DATA_NACK_ONLY_BASE_IDX 0 +#define mmCKSVII2C1_IC_DMA_CR 0x00a2 +#define mmCKSVII2C1_IC_DMA_CR_BASE_IDX 0 +#define mmCKSVII2C1_IC_DMA_TDLR 0x00a3 +#define mmCKSVII2C1_IC_DMA_TDLR_BASE_IDX 0 +#define mmCKSVII2C1_IC_DMA_RDLR 0x00a4 +#define mmCKSVII2C1_IC_DMA_RDLR_BASE_IDX 0 +#define mmCKSVII2C1_IC_SDA_SETUP 0x00a5 +#define mmCKSVII2C1_IC_SDA_SETUP_BASE_IDX 0 +#define mmCKSVII2C1_IC_ACK_GENERAL_CALL 0x00a6 +#define mmCKSVII2C1_IC_ACK_GENERAL_CALL_BASE_IDX 0 +#define mmCKSVII2C1_IC_ENABLE_STATUS 0x00a7 +#define mmCKSVII2C1_IC_ENABLE_STATUS_BASE_IDX 0 +#define mmCKSVII2C1_IC_FS_SPKLEN 0x00a8 +#define mmCKSVII2C1_IC_FS_SPKLEN_BASE_IDX 0 +#define mmCKSVII2C1_IC_HS_SPKLEN 0x00a9 +#define mmCKSVII2C1_IC_HS_SPKLEN_BASE_IDX 0 +#define mmCKSVII2C1_IC_CLR_RESTART_DET 0x00aa +#define mmCKSVII2C1_IC_CLR_RESTART_DET_BASE_IDX 0 +#define mmCKSVII2C1_IC_COMP_PARAM_1 0x00ab +#define mmCKSVII2C1_IC_COMP_PARAM_1_BASE_IDX 0 +#define mmCKSVII2C1_IC_COMP_VERSION 0x00ac +#define mmCKSVII2C1_IC_COMP_VERSION_BASE_IDX 0 +#define mmCKSVII2C1_IC_COMP_TYPE 0x00ad +#define mmCKSVII2C1_IC_COMP_TYPE_BASE_IDX 0 #define mmSMUIO_MP_RESET_INTR 0x00c1 #define mmSMUIO_MP_RESET_INTR_BASE_IDX 0 #define mmSMUIO_SOC_HALT 0x00c2 diff --git a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h index f8afa3518bf2..6905a9618127 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/smuio/smuio_11_0_0_sh_mask.h @@ -268,6 +268,182 @@ //CKSVII2C_IC_COMP_TYPE #define CKSVII2C_IC_COMP_TYPE__COMP_TYPE__SHIFT 0x0 #define CKSVII2C_IC_COMP_TYPE__COMP_TYPE_MASK 0xFFFFFFFFL +//CKSVII2C1_IC_CON +#define CKSVII2C1_IC_CON__IC1_MASTER_MODE__SHIFT 0x0 +#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE__SHIFT 0x1 +#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE__SHIFT 0x3 +#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER__SHIFT 0x4 +#define CKSVII2C1_IC_CON__IC1_RESTART_EN__SHIFT 0x5 +#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE__SHIFT 0x6 +#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED__SHIFT 0x7 +#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL__SHIFT 0x8 +#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL__SHIFT 0x9 +#define CKSVII2C1_IC_CON__IC1_MASTER_MODE_MASK 0x00000001L +#define CKSVII2C1_IC_CON__IC1_MAX_SPEED_MODE_MASK 0x00000006L +#define CKSVII2C1_IC_CON__IC1_10BITADDR_SLAVE_MASK 0x00000008L +#define CKSVII2C1_IC_CON__IC1_10BITADDR_MASTER_MASK 0x00000010L +#define CKSVII2C1_IC_CON__IC1_RESTART_EN_MASK 0x00000020L +#define CKSVII2C1_IC_CON__IC1_SLAVE_DISABLE_MASK 0x00000040L +#define CKSVII2C1_IC_CON__STOP1_DET_IFADDRESSED_MASK 0x00000080L +#define CKSVII2C1_IC_CON__TX1_EMPTY_CTRL_MASK 0x00000100L +#define CKSVII2C1_IC_CON__RX1_FIFO_FULL_HLD_CTRL_MASK 0x00000200L +//CKSVII2C1_IC_TAR +#define CKSVII2C1_IC_TAR__IC1_TAR__SHIFT 0x0 +#define CKSVII2C1_IC_TAR__GC1_OR_START__SHIFT 0xa +#define CKSVII2C1_IC_TAR__SPECIAL1__SHIFT 0xb +#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER__SHIFT 0xc +#define CKSVII2C1_IC_TAR__IC1_TAR_MASK 0x000003FFL +#define CKSVII2C1_IC_TAR__GC1_OR_START_MASK 0x00000400L +#define CKSVII2C1_IC_TAR__SPECIAL1_MASK 0x00000800L +#define CKSVII2C1_IC_TAR__IC1_10BITADDR_MASTER_MASK 0x00001000L +//CKSVII2C1_IC_SAR +#define CKSVII2C1_IC_SAR__IC1_SAR__SHIFT 0x0 +#define CKSVII2C1_IC_SAR__IC1_SAR_MASK 0x000003FFL +//CKSVII2C1_IC_HS_MADDR +#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR__SHIFT 0x0 +#define CKSVII2C1_IC_HS_MADDR__IC1_HS_MADDR_MASK 0x00000007L +//CKSVII2C1_IC_DATA_CMD +#define CKSVII2C1_IC_DATA_CMD__DAT1__SHIFT 0x0 +#define CKSVII2C1_IC_DATA_CMD__CMD1__SHIFT 0x8 +#define CKSVII2C1_IC_DATA_CMD__STOP1__SHIFT 0x9 +#define CKSVII2C1_IC_DATA_CMD__RESTART1__SHIFT 0xa +#define CKSVII2C1_IC_DATA_CMD__DAT1_MASK 0x000000FFL +#define CKSVII2C1_IC_DATA_CMD__CMD1_MASK 0x00000100L +#define CKSVII2C1_IC_DATA_CMD__STOP1_MASK 0x00000200L +#define CKSVII2C1_IC_DATA_CMD__RESTART1_MASK 0x00000400L +//CKSVII2C1_IC_SS_SCL_HCNT +#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_SS_SCL_HCNT__IC1_SS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_SS_SCL_LCNT +#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_SS_SCL_LCNT__IC1_SS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_FS_SCL_HCNT +#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_FS_SCL_HCNT__IC1_FS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_FS_SCL_LCNT +#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_FS_SCL_LCNT__IC1_FS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_HS_SCL_HCNT +#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT__SHIFT 0x0 +#define CKSVII2C1_IC_HS_SCL_HCNT__IC1_HS_SCL_HCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_HS_SCL_LCNT +#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT__SHIFT 0x0 +#define CKSVII2C1_IC_HS_SCL_LCNT__IC1_HS_SCL_LCNT_MASK 0x0000FFFFL +//CKSVII2C1_IC_INTR_STAT +#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER__SHIFT 0x0 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER__SHIFT 0x1 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL__SHIFT 0x2 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER__SHIFT 0x3 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ__SHIFT 0x5 +#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT__SHIFT 0x6 +#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE__SHIFT 0x7 +#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY__SHIFT 0x8 +#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET__SHIFT 0x9 +#define CKSVII2C1_IC_INTR_STAT__R1_START_DET__SHIFT 0xa +#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL__SHIFT 0xb +#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET__SHIFT 0xc +#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C1_IC_INTR_STAT__R1_RX_UNDER_MASK 0x00000001L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_OVER_MASK 0x00000002L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_FULL_MASK 0x00000004L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_OVER_MASK 0x00000008L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C1_IC_INTR_STAT__R1_RD_REQ_MASK 0x00000020L +#define CKSVII2C1_IC_INTR_STAT__R1_TX_ABRT_MASK 0x00000040L +#define CKSVII2C1_IC_INTR_STAT__R1_RX_DONE_MASK 0x00000080L +#define CKSVII2C1_IC_INTR_STAT__R1_ACTIVITY_MASK 0x00000100L +#define CKSVII2C1_IC_INTR_STAT__R1_STOP_DET_MASK 0x00000200L +#define CKSVII2C1_IC_INTR_STAT__R1_START_DET_MASK 0x00000400L +#define CKSVII2C1_IC_INTR_STAT__R1_GEN_CALL_MASK 0x00000800L +#define CKSVII2C1_IC_INTR_STAT__R1_RESTART_DET_MASK 0x00001000L +#define CKSVII2C1_IC_INTR_STAT__R1_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C1_IC_INTR_MASK +#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER__SHIFT 0x0 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER__SHIFT 0x1 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL__SHIFT 0x2 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER__SHIFT 0x3 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY__SHIFT 0x4 +#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ__SHIFT 0x5 +#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT__SHIFT 0x6 +#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE__SHIFT 0x7 +#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY__SHIFT 0x8 +#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET__SHIFT 0x9 +#define CKSVII2C1_IC_INTR_MASK__M1_START_DET__SHIFT 0xa +#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL__SHIFT 0xb +#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET__SHIFT 0xc +#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD__SHIFT 0xd +#define CKSVII2C1_IC_INTR_MASK__M1_RX_UNDER_MASK 0x00000001L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_OVER_MASK 0x00000002L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_FULL_MASK 0x00000004L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_OVER_MASK 0x00000008L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_EMPTY_MASK 0x00000010L +#define CKSVII2C1_IC_INTR_MASK__M1_RD_REQ_MASK 0x00000020L +#define CKSVII2C1_IC_INTR_MASK__M1_TX_ABRT_MASK 0x00000040L +#define CKSVII2C1_IC_INTR_MASK__M1_RX_DONE_MASK 0x00000080L +#define CKSVII2C1_IC_INTR_MASK__M1_ACTIVITY_MASK 0x00000100L +#define CKSVII2C1_IC_INTR_MASK__M1_STOP_DET_MASK 0x00000200L +#define CKSVII2C1_IC_INTR_MASK__M1_START_DET_MASK 0x00000400L +#define CKSVII2C1_IC_INTR_MASK__M1_GEN_CALL_MASK 0x00000800L +#define CKSVII2C1_IC_INTR_MASK__M1_RESTART_DET_MASK 0x00001000L +#define CKSVII2C1_IC_INTR_MASK__M1_MST_ON_HOLD_MASK 0x00002000L +//CKSVII2C1_IC_RAW_INTR_STAT +//CKSVII2C1_IC_RX_TL +//CKSVII2C1_IC_TX_TL +//CKSVII2C1_IC_CLR_INTR +//CKSVII2C1_IC_CLR_RX_UNDER +//CKSVII2C1_IC_CLR_RX_OVER +//CKSVII2C1_IC_CLR_TX_OVER +//CKSVII2C1_IC_CLR_RD_REQ +//CKSVII2C1_IC_CLR_TX_ABRT +//CKSVII2C1_IC_CLR_RX_DONE +//CKSVII2C1_IC_CLR_ACTIVITY +//CKSVII2C1_IC_CLR_STOP_DET +//CKSVII2C1_IC_CLR_START_DET +//CKSVII2C1_IC_CLR_GEN_CALL +//CKSVII2C1_IC_ENABLE +#define CKSVII2C1_IC_ENABLE__ENABLE1__SHIFT 0x0 +#define CKSVII2C1_IC_ENABLE__ABORT1__SHIFT 0x1 +#define CKSVII2C1_IC_ENABLE__ENABLE1_MASK 0x00000001L +#define CKSVII2C1_IC_ENABLE__ABORT1_MASK 0x00000002L +//CKSVII2C1_IC_STATUS +#define CKSVII2C1_IC_STATUS__ACTIVITY1__SHIFT 0x0 +#define CKSVII2C1_IC_STATUS__TFNF1__SHIFT 0x1 +#define CKSVII2C1_IC_STATUS__TFE1__SHIFT 0x2 +#define CKSVII2C1_IC_STATUS__RFNE1__SHIFT 0x3 +#define CKSVII2C1_IC_STATUS__RFF1__SHIFT 0x4 +#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY__SHIFT 0x5 +#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY__SHIFT 0x6 +#define CKSVII2C1_IC_STATUS__ACTIVITY1_MASK 0x00000001L +#define CKSVII2C1_IC_STATUS__TFNF1_MASK 0x00000002L +#define CKSVII2C1_IC_STATUS__TFE1_MASK 0x00000004L +#define CKSVII2C1_IC_STATUS__RFNE1_MASK 0x00000008L +#define CKSVII2C1_IC_STATUS__RFF1_MASK 0x00000010L +#define CKSVII2C1_IC_STATUS__MST1_ACTIVITY_MASK 0x00000020L +#define CKSVII2C1_IC_STATUS__SLV1_ACTIVITY_MASK 0x00000040L +//CKSVII2C1_IC_TXFLR +//CKSVII2C1_IC_RXFLR +//CKSVII2C1_IC_SDA_HOLD +#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD__SHIFT 0x0 +#define CKSVII2C1_IC_SDA_HOLD__IC1_SDA_HOLD_MASK 0x00FFFFFFL +//CKSVII2C1_IC_TX_ABRT_SOURCE +//CKSVII2C1_IC_SLV_DATA_NACK_ONLY +//CKSVII2C1_IC_DMA_CR +//CKSVII2C1_IC_DMA_TDLR +//CKSVII2C1_IC_DMA_RDLR +//CKSVII2C1_IC_SDA_SETUP +#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP__SHIFT 0x0 +#define CKSVII2C1_IC_SDA_SETUP__SDA1_SETUP_MASK 0x000000FFL +//CKSVII2C1_IC_ACK_GENERAL_CALL +#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL__SHIFT 0x0 +#define CKSVII2C1_IC_ACK_GENERAL_CALL__ACK1_GENERAL_CALL_MASK 0x00000001L +//CKSVII2C1_IC_ENABLE_STATUS +#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN__SHIFT 0x0 +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED__SHIFT 0x1 +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED__SHIFT 0x2 +#define CKSVII2C1_IC_ENABLE_STATUS__IC1_EN_MASK 0x00000001L +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_RX_ABORTED_MASK 0x00000002L +#define CKSVII2C1_IC_ENABLE_STATUS__SLV1_FIFO_FILLED_AND_FLUSHED_MASK 0x00000004L //SMUIO_MP_RESET_INTR #define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR__SHIFT 0x0 #define SMUIO_MP_RESET_INTR__SMUIO_MP_RESET_INTR_MASK 0x00000001L diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h index cf2149cc12ee..90350f46a0c4 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_2_5_offset.h @@ -24,6 +24,18 @@ // addressBlock: uvd0_mmsch_dec // base address: 0x1e000 +#define mmMMSCH_VF_VMID 0x000b +#define mmMMSCH_VF_VMID_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_LO 0x000c +#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0 +#define mmMMSCH_VF_CTX_ADDR_HI 0x000d +#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0 +#define mmMMSCH_VF_CTX_SIZE 0x000e +#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_HOST 0x0012 +#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0 +#define mmMMSCH_VF_MAILBOX_RESP 0x0013 +#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0 // addressBlock: uvd0_jpegnpdec diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index e88541d67aa0..dd7cbc00a0aa 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -492,12 +492,13 @@ struct atom_firmware_info_v3_1 /* Total 32bit cap indication */ enum atombios_firmware_capability { - ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001, - ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002, - ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040, - ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080, - ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100, - ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200, + ATOM_FIRMWARE_CAP_FIRMWARE_POSTED = 0x00000001, + ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION = 0x00000002, + ATOM_FIRMWARE_CAP_WMI_SUPPORT = 0x00000040, + ATOM_FIRMWARE_CAP_HWEMU_ENABLE = 0x00000080, + ATOM_FIRMWARE_CAP_HWEMU_UMC_CFG = 0x00000100, + ATOM_FIRMWARE_CAP_SRAM_ECC = 0x00000200, + ATOM_FIRMWARE_CAP_ENABLE_2STAGE_BIST_TRAINING = 0x00000400, }; enum atom_cooling_solution_id{ @@ -671,6 +672,20 @@ struct vram_usagebyfirmware_v2_1 uint16_t used_by_driver_in_kb; }; +/* This is part of vram_usagebyfirmware_v2_1 */ +struct vram_reserve_block +{ + uint32_t start_address_in_kb; + uint16_t used_by_firmware_in_kb; + uint16_t used_by_driver_in_kb; +}; + +/* Definitions for constance */ +enum atomfirmware_internal_constants +{ + ONE_KiB = 0x400, + ONE_MiB = 0x100000, +}; /* *************************************************************************** diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h index 5dcb776548d8..7ec4331e67f2 100644 --- a/drivers/gpu/drm/amd/include/discovery.h +++ b/drivers/gpu/drm/amd/include/discovery.h @@ -25,7 +25,6 @@ #define _DISCOVERY_H_ #define PSP_HEADER_SIZE 256 -#define BINARY_MAX_SIZE (64 << 10) #define BINARY_SIGNATURE 0x28211407 #define DISCOVERY_TABLE_SIGNATURE 0x53445049 diff --git a/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h new file mode 100644 index 000000000000..79af4258f259 --- /dev/null +++ b/drivers/gpu/drm/amd/include/ivsrcid/nbio/irqsrcs_nbif_7_4.h @@ -0,0 +1,42 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __IRQSRCS_NBIF_7_4_H__ +#define __IRQSRCS_NBIF_7_4_H__ + +#define NBIF_7_4__SRCID__CHIP_ERR_INT_EVENT 0x5E // Error generated +#define NBIF_7_4__SRCID__DOORBELL_INTERRUPT 0x5F // Interrupt for doorbell event during VDDGFX off +#define NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT 0x60 // Interrupt for ras_intr_valid from RAS controller +#define NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT 0x61 // Interrupt for SDP ErrEvent received from ATHUB +#define NBIF_7_4__SRCID__PF_VF_MSGBUF_VALID 0x87 // Valid message in PF->VF mailbox message buffer (The interrupt is sent on behalf of PF) +#define NBIF_7_4__SRCID__PF_VF_MSGBUF_ACK 0x88 // Acknowledge message in PF->VF mailbox message buffer (The interrupt is sent on behalf of VF) +#define NBIF_7_4__SRCID__VF_PF_MSGBUF_VALID 0x89 // Valid message in VF->PF mailbox message buffer (The interrupt is sent on behalf of VF) +#define NBIF_7_4__SRCID__VF_PF_MSGBUF_ACK 0x8A // Acknowledge message in VF->PF mailbox message buffer (The interrupt is sent on behalf of PF) +#define NBIF_7_4__SRCID__CHIP_DPA_INT_EVENT 0xA0 // BIF_CHIP_DPA_INT_EVENT +#define NBIF_7_4__SRCID__CHIP_SLOT_POWER_CHG_INT_EVENT 0xA1 // BIF_CHIP_SLOT_POWER_CHG_INT_EVENT +#define NBIF_7_4__SRCID__ATOMIC_UR_OPCODE 0xCE // BIF receives unsupported atomic opcode from MC +#define NBIF_7_4__SRCID__ATOMIC_REQESTEREN_LOW 0xCF // BIF receive atomic request from MC while AtomicOp Requester is not enabled in PCIE config space + +#endif // __IRQSRCS_NBIF_7_4_H__ diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 98b9533e672b..2cd217e60125 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -291,15 +291,18 @@ struct kfd2kgd_calls { uint32_t (*address_watch_get_offset)(struct kgd_dev *kgd, unsigned int watch_point_id, unsigned int reg_offset); - bool (*get_atc_vmid_pasid_mapping_valid)( + bool (*get_atc_vmid_pasid_mapping_info)( struct kgd_dev *kgd, - uint8_t vmid); - uint16_t (*get_atc_vmid_pasid_mapping_pasid)( - struct kgd_dev *kgd, - uint8_t vmid); + uint8_t vmid, + uint16_t *p_pasid); + /* No longer needed from GFXv9 onward. The scratch base address is + * passed to the shader by the CP. It's the user mode driver's + * responsibility. + */ void (*set_scratch_backing_va)(struct kgd_dev *kgd, uint64_t va, uint32_t vmid); + int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config); void (*set_vm_context_page_table_base)(struct kgd_dev *kgd, diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 27cf0afaa0b4..a7f92d0b3a90 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -179,6 +179,11 @@ enum pp_mp1_state { PP_MP1_STATE_RESET, }; +enum pp_df_cstate { + DF_CSTATE_DISALLOW = 0, + DF_CSTATE_ALLOW, +}; + #define PP_GROUP_MASK 0xF0000000 #define PP_GROUP_SHIFT 28 @@ -215,6 +220,9 @@ enum pp_mp1_state { ((group) << PP_GROUP_SHIFT | (block) << PP_BLOCK_SHIFT | \ (support) << PP_STATE_SUPPORT_SHIFT | (state) << PP_STATE_SHIFT) +#define XGMI_MODE_PSTATE_D3 0 +#define XGMI_MODE_PSTATE_D0 1 + struct seq_file; enum amd_pp_clock_type; struct amd_pp_simple_clock_info; @@ -312,6 +320,8 @@ struct amd_pm_funcs { int (*get_ppfeature_status)(void *handle, char *buf); int (*set_ppfeature_status)(void *handle, uint64_t ppfeature_masks); int (*asic_reset_mode_2)(void *handle); + int (*set_df_cstate)(void *handle, enum pp_df_cstate state); + int (*set_xgmi_pstate)(void *handle, uint32_t pstate); }; #endif diff --git a/drivers/gpu/drm/amd/include/renoir_ip_offset.h b/drivers/gpu/drm/amd/include/renoir_ip_offset.h index 094648cac392..07633e22e99a 100644 --- a/drivers/gpu/drm/amd/include/renoir_ip_offset.h +++ b/drivers/gpu/drm/amd/include/renoir_ip_offset.h @@ -169,6 +169,11 @@ static const struct IP_BASE NBIF0_BASE ={ { { { 0x00000000, 0x00000014, 0x00000D { { 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } } } }; +static const struct IP_BASE DCN_BASE ={ { { { 0x00000012, 0x000000C0, 0x000034C0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } }, + { { 0, 0, 0, 0, 0 } } } }; static const struct IP_BASE OSSSYS_BASE ={ { { { 0x000010A0, 0x0240A000, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } }, { { 0, 0, 0, 0, 0 } }, @@ -1361,4 +1366,33 @@ static const struct IP_BASE UVD0_BASE ={ { { { 0x00007800, 0x00007E00, 0x0240300 #define UVD0_BASE__INST6_SEG3 0 #define UVD0_BASE__INST6_SEG4 0 +#define DCN_BASE__INST0_SEG0 0x00000012 +#define DCN_BASE__INST0_SEG1 0x000000C0 +#define DCN_BASE__INST0_SEG2 0x000034C0 +#define DCN_BASE__INST0_SEG3 0 +#define DCN_BASE__INST0_SEG4 0 + +#define DCN_BASE__INST1_SEG0 0 +#define DCN_BASE__INST1_SEG1 0 +#define DCN_BASE__INST1_SEG2 0 +#define DCN_BASE__INST1_SEG3 0 +#define DCN_BASE__INST1_SEG4 0 + +#define DCN_BASE__INST2_SEG0 0 +#define DCN_BASE__INST2_SEG1 0 +#define DCN_BASE__INST2_SEG2 0 +#define DCN_BASE__INST2_SEG3 0 +#define DCN_BASE__INST2_SEG4 0 + +#define DCN_BASE__INST3_SEG0 0 +#define DCN_BASE__INST3_SEG1 0 +#define DCN_BASE__INST3_SEG2 0 +#define DCN_BASE__INST3_SEG3 0 +#define DCN_BASE__INST3_SEG4 0 + +#define DCN_BASE__INST4_SEG0 0 +#define DCN_BASE__INST4_SEG1 0 +#define DCN_BASE__INST4_SEG2 0 +#define DCN_BASE__INST4_SEG3 0 +#define DCN_BASE__INST4_SEG4 0 #endif diff --git a/drivers/gpu/drm/amd/include/vega10_enum.h b/drivers/gpu/drm/amd/include/vega10_enum.h index c14ba65a2415..adf1b754666e 100644 --- a/drivers/gpu/drm/amd/include/vega10_enum.h +++ b/drivers/gpu/drm/amd/include/vega10_enum.h @@ -1037,6 +1037,7 @@ TCC_CACHE_POLICY_STREAM = 0x00000001, typedef enum MTYPE { MTYPE_NC = 0x00000000, MTYPE_WC = 0x00000001, +MTYPE_RW = 0x00000001, MTYPE_CC = 0x00000002, MTYPE_UC = 0x00000003, } MTYPE; diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index fa8ad7db2b3a..7932eb163a00 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -969,6 +969,14 @@ static int pp_dpm_switch_power_profile(void *handle, workload = hwmgr->workload_setting[index]; } + if (type == PP_SMC_POWER_PROFILE_COMPUTE && + hwmgr->hwmgr_func->disable_power_features_for_compute_performance) { + if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) { + mutex_unlock(&hwmgr->smu_lock); + return -EINVAL; + } + } + if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); mutex_unlock(&hwmgr->smu_lock); @@ -1421,6 +1429,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap) { struct pp_hwmgr *hwmgr = handle; + *cap = false; if (!hwmgr) return -EINVAL; @@ -1548,6 +1557,40 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire) return ret; } +static int pp_set_df_cstate(void *handle, enum pp_df_cstate state) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr) + return -EINVAL; + + if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate) + return 0; + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->set_df_cstate(hwmgr, state); + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + +static int pp_set_xgmi_pstate(void *handle, uint32_t pstate) +{ + struct pp_hwmgr *hwmgr = handle; + + if (!hwmgr) + return -EINVAL; + + if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate) + return 0; + + mutex_lock(&hwmgr->smu_lock); + hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate); + mutex_unlock(&hwmgr->smu_lock); + + return 0; +} + static const struct amd_pm_funcs pp_dpm_funcs = { .load_firmware = pp_dpm_load_fw, .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, @@ -1606,4 +1649,6 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .set_ppfeature_status = pp_set_ppfeature_status, .asic_reset_mode_2 = pp_asic_reset_mode_2, .smu_i2c_bus_access = pp_smu_i2c_bus_access, + .set_df_cstate = pp_set_df_cstate, + .set_xgmi_pstate = pp_set_xgmi_pstate, }; diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 4acf139ea014..40b546c75fc2 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -25,11 +25,16 @@ #include "pp_debug.h" #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "soc15_common.h" #include "smu_v11_0.h" #include "smu_v12_0.h" #include "atom.h" #include "amd_pcie.h" +#include "vega20_ppt.h" +#include "arcturus_ppt.h" +#include "navi10_ppt.h" +#include "renoir_ppt.h" #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) #type @@ -67,6 +72,8 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) uint32_t sort_feature[SMU_FEATURE_COUNT]; uint64_t hw_feature_count = 0; + mutex_lock(&smu->mutex); + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); if (ret) goto failed; @@ -92,9 +99,57 @@ size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf) } failed: + mutex_unlock(&smu->mutex); + return size; } +static int smu_feature_update_enable_state(struct smu_context *smu, + uint64_t feature_mask, + bool enabled) +{ + struct smu_feature *feature = &smu->smu_feature; + uint32_t feature_low = 0, feature_high = 0; + int ret = 0; + + if (!smu->pm_enabled) + return ret; + + feature_low = (feature_mask >> 0 ) & 0xffffffff; + feature_high = (feature_mask >> 32) & 0xffffffff; + + if (enabled) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, + feature_low); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, + feature_high); + if (ret) + return ret; + } else { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, + feature_low); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, + feature_high); + if (ret) + return ret; + } + + mutex_lock(&feature->mutex); + if (enabled) + bitmap_or(feature->enabled, feature->enabled, + (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); + else + bitmap_andnot(feature->enabled, feature->enabled, + (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); + mutex_unlock(&feature->mutex); + + return ret; +} + int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) { int ret = 0; @@ -103,9 +158,11 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) uint64_t feature_2_disabled = 0; uint64_t feature_enables = 0; + mutex_lock(&smu->mutex); + ret = smu_feature_get_enabled_mask(smu, feature_mask, 2); if (ret) - return ret; + goto out; feature_enables = ((uint64_t)feature_mask[1] << 32 | (uint64_t)feature_mask[0]); @@ -115,14 +172,17 @@ int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask) if (feature_2_enabled) { ret = smu_feature_update_enable_state(smu, feature_2_enabled, true); if (ret) - return ret; + goto out; } if (feature_2_disabled) { ret = smu_feature_update_enable_state(smu, feature_2_disabled, false); if (ret) - return ret; + goto out; } +out: + mutex_unlock(&smu->mutex); + return ret; } @@ -159,8 +219,7 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max) { - int ret = 0, clk_id = 0; - uint32_t param; + int ret = 0; if (min <= 0 && max <= 0) return -EINVAL; @@ -168,27 +227,7 @@ int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, if (!smu_clk_dpm_is_enabled(smu, clk_type)) return 0; - clk_id = smu_clk_get_index(smu, clk_type); - if (clk_id < 0) - return clk_id; - - if (max > 0) { - param = (uint32_t)((clk_id << 16) | (max & 0xffff)); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, - param); - if (ret) - return ret; - } - - if (min > 0) { - param = (uint32_t)((clk_id << 16) | (min & 0xffff)); - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, - param); - if (ret) - return ret; - } - - + ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max); return ret; } @@ -229,7 +268,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, } int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t *min, uint32_t *max) + uint32_t *min, uint32_t *max, bool lock_needed) { uint32_t clock_limit; int ret = 0; @@ -237,6 +276,9 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, if (!min && !max) return -EINVAL; + if (lock_needed) + mutex_lock(&smu->mutex); + if (!smu_clk_dpm_is_enabled(smu, clk_type)) { switch (clk_type) { case SMU_MCLK: @@ -260,14 +302,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, *min = clock_limit / 100; if (max) *max = clock_limit / 100; - - return 0; + } else { + /* + * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the + * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs). + */ + ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max); } - /* - * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the - * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs). - */ - ret = smu_get_dpm_ultimate_freq(smu, clk_type, min, max); + + if (lock_needed) + mutex_unlock(&smu->mutex); + return ret; } @@ -338,7 +383,20 @@ bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type) return true; } - +/** + * smu_dpm_set_power_gate - power gate/ungate the specific IP block + * + * @smu: smu_context pointer + * @block_type: the IP block to power gate/ungate + * @gate: to power gate if true, ungate otherwise + * + * This API uses no smu->mutex lock protection due to: + * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce). + * This is guarded to be race condition free by the caller. + * 2. Or get called on user setting request of power_dpm_force_performance_level. + * Under this case, the smu->mutex lock protection is already enforced on + * the parent API smu_force_performance_level of the call path. + */ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, bool gate) { @@ -364,12 +422,6 @@ int smu_dpm_set_power_gate(struct smu_context *smu, uint32_t block_type, return ret; } -enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) -{ - /* not support power state */ - return POWER_STATE_TYPE_DEFAULT; -} - int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info) { @@ -439,7 +491,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int int ret = 0; int table_id = smu_table_get_index(smu, table_index); - if (!table_data || table_id >= smu_table->table_count || table_id < 0) + if (!table_data || table_id >= SMU_TABLE_COUNT || table_id < 0) return -EINVAL; table = &smu_table->tables[table_index]; @@ -463,7 +515,7 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int return ret; /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); if (!drv2smu) memcpy(table_data, table->cpu_addr, table->size); @@ -483,7 +535,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev) bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) { - if (amdgpu_dpm != 1) + if (!is_support_sw_smu(adev)) return false; if (adev->asic_type == CHIP_VEGA20) @@ -495,16 +547,23 @@ bool is_support_sw_smu_xgmi(struct amdgpu_device *adev) int smu_sys_get_pp_table(struct smu_context *smu, void **table) { struct smu_table_context *smu_table = &smu->smu_table; + uint32_t powerplay_table_size; if (!smu_table->power_play_table && !smu_table->hardcode_pptable) return -EINVAL; + mutex_lock(&smu->mutex); + if (smu_table->hardcode_pptable) *table = smu_table->hardcode_pptable; else *table = smu_table->power_play_table; - return smu_table->power_play_table_size; + powerplay_table_size = smu_table->power_play_table_size; + + mutex_unlock(&smu->mutex); + + return powerplay_table_size; } int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) @@ -531,13 +590,18 @@ int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size) memcpy(smu_table->hardcode_pptable, buf, size); smu_table->power_play_table = smu_table->hardcode_pptable; smu_table->power_play_table_size = size; - mutex_unlock(&smu->mutex); + + /* + * Special hw_fini action(for Navi1x, the DPMs disablement will be + * skipped) may be needed for custom pptable uploading. + */ + smu->uploading_custom_pp_table = true; ret = smu_reset(smu); if (ret) pr_info("smu reset failed, ret = %d\n", ret); - return ret; + smu->uploading_custom_pp_table = false; failed: mutex_unlock(&smu->mutex); @@ -569,41 +633,7 @@ int smu_feature_init_dpm(struct smu_context *smu) return ret; } -int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled) -{ - uint32_t feature_low = 0, feature_high = 0; - int ret = 0; - - if (!smu->pm_enabled) - return ret; - feature_low = (feature_mask >> 0 ) & 0xffffffff; - feature_high = (feature_mask >> 32) & 0xffffffff; - - if (enabled) { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow, - feature_low); - if (ret) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh, - feature_high); - if (ret) - return ret; - - } else { - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow, - feature_low); - if (ret) - return ret; - ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh, - feature_high); - if (ret) - return ret; - - } - - return ret; -} int smu_feature_is_enabled(struct smu_context *smu, enum smu_feature_mask mask) { @@ -633,8 +663,6 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, { struct smu_feature *feature = &smu->smu_feature; int feature_id; - uint64_t feature_mask = 0; - int ret = 0; feature_id = smu_feature_get_index(smu, mask); if (feature_id < 0) @@ -642,22 +670,9 @@ int smu_feature_set_enabled(struct smu_context *smu, enum smu_feature_mask mask, WARN_ON(feature_id > feature->feature_num); - feature_mask = 1ULL << feature_id; - - mutex_lock(&feature->mutex); - ret = smu_feature_update_enable_state(smu, feature_mask, enable); - if (ret) - goto failed; - - if (enable) - test_and_set_bit(feature_id, feature->enabled); - else - test_and_clear_bit(feature_id, feature->enabled); - -failed: - mutex_unlock(&feature->mutex); - - return ret; + return smu_feature_update_enable_state(smu, + 1ULL << feature_id, + enable); } int smu_feature_is_supported(struct smu_context *smu, enum smu_feature_mask mask) @@ -707,20 +722,27 @@ static int smu_set_funcs(struct amdgpu_device *adev) { struct smu_context *smu = &adev->smu; + if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) + smu->od_enabled = true; + switch (adev->asic_type) { case CHIP_VEGA20: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + vega20_set_ppt_funcs(smu); + break; case CHIP_NAVI10: case CHIP_NAVI14: case CHIP_NAVI12: + navi10_set_ppt_funcs(smu); + break; case CHIP_ARCTURUS: - if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) - smu->od_enabled = true; - smu_v11_0_set_smu_funcs(smu); + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + arcturus_set_ppt_funcs(smu); + /* OD is not supported on Arcturus */ + smu->od_enabled =false; break; case CHIP_RENOIR: - if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) - smu->od_enabled = true; - smu_v12_0_set_smu_funcs(smu); + renoir_set_ppt_funcs(smu); break; default: return -EINVAL; @@ -736,6 +758,7 @@ static int smu_early_init(void *handle) smu->adev = adev; smu->pm_enabled = !!amdgpu_dpm; + smu->is_apu = false; mutex_init(&smu->mutex); return smu_set_funcs(adev); @@ -749,11 +772,10 @@ static int smu_late_init(void *handle) if (!smu->pm_enabled) return 0; - mutex_lock(&smu->mutex); smu_handle_task(&adev->smu, smu->smu_dpm.dpm_level, - AMD_PP_TASK_COMPLETE_INIT); - mutex_unlock(&smu->mutex); + AMD_PP_TASK_COMPLETE_INIT, + false); return 0; } @@ -919,14 +941,9 @@ static int smu_init_fb_allocations(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; - uint32_t table_count = smu_table->table_count; - uint32_t i = 0; - int32_t ret = 0; + int ret, i; - if (table_count <= 0) - return -EINVAL; - - for (i = 0 ; i < table_count; i++) { + for (i = 0; i < SMU_TABLE_COUNT; i++) { if (tables[i].size == 0) continue; ret = amdgpu_bo_create_kernel(adev, @@ -942,7 +959,7 @@ static int smu_init_fb_allocations(struct smu_context *smu) return 0; failed: - for (; i > 0; i--) { + while (--i >= 0) { if (tables[i].size == 0) continue; amdgpu_bo_free_kernel(&tables[i].bo, @@ -957,13 +974,12 @@ static int smu_fini_fb_allocations(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = smu_table->tables; - uint32_t table_count = smu_table->table_count; uint32_t i = 0; - if (table_count == 0 || tables == NULL) + if (!tables) return 0; - for (i = 0 ; i < table_count; i++) { + for (i = 0; i < SMU_TABLE_COUNT; i++) { if (tables[i].size == 0) continue; amdgpu_bo_free_kernel(&tables[i].bo, @@ -974,50 +990,6 @@ static int smu_fini_fb_allocations(struct smu_context *smu) return 0; } -static int smu_override_pcie_parameters(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - uint32_t pcie_gen = 0, pcie_width = 0, smu_pcie_arg; - int ret; - - if (adev->flags & AMD_IS_APU) - return 0; - - if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) - pcie_gen = 3; - else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) - pcie_gen = 2; - else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) - pcie_gen = 1; - else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) - pcie_gen = 0; - - /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 - * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 - * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 - */ - if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) - pcie_width = 6; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) - pcie_width = 5; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) - pcie_width = 4; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) - pcie_width = 3; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) - pcie_width = 2; - else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) - pcie_width = 1; - - smu_pcie_arg = (1 << 16) | (pcie_gen << 8) | pcie_width; - ret = smu_send_smc_msg_with_param(smu, - SMU_MSG_OverridePcieParameters, - smu_pcie_arg); - if (ret) - pr_err("[%s] Attempt to override pcie params failed!\n", __func__); - return ret; -} - static int smu_smc_table_hw_init(struct smu_context *smu, bool initialize) { @@ -1092,8 +1064,8 @@ static int smu_smc_table_hw_init(struct smu_context *smu, if (ret) return ret; - /* issue RunAfllBtc msg */ - ret = smu_run_afll_btc(smu); + /* issue Run*Btc msg */ + ret = smu_run_btc(smu); if (ret) return ret; @@ -1106,10 +1078,6 @@ static int smu_smc_table_hw_init(struct smu_context *smu, return ret; if (adev->asic_type != CHIP_ARCTURUS) { - ret = smu_override_pcie_parameters(smu); - if (ret) - return ret; - ret = smu_notify_display_change(smu); if (ret) return ret; @@ -1138,6 +1106,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu, return ret; } + if (adev->asic_type != CHIP_ARCTURUS) { + ret = smu_override_pcie_parameters(smu); + if (ret) + return ret; + } + ret = smu_set_default_od_settings(smu, initialize); if (ret) return ret; @@ -1147,7 +1121,7 @@ static int smu_smc_table_hw_init(struct smu_context *smu, if (ret) return ret; - ret = smu_get_power_limit(smu, &smu->default_power_limit, true); + ret = smu_get_power_limit(smu, &smu->default_power_limit, false, false); if (ret) return ret; } @@ -1226,29 +1200,46 @@ static int smu_free_memory_pool(struct smu_context *smu) return ret; } -static int smu_hw_init(void *handle) +static int smu_start_smc_engine(struct smu_context *smu) { - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct smu_context *smu = &adev->smu; + struct amdgpu_device *adev = smu->adev; + int ret = 0; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { if (adev->asic_type < CHIP_NAVI10) { - ret = smu_load_microcode(smu); - if (ret) - return ret; + if (smu->ppt_funcs->load_microcode) { + ret = smu->ppt_funcs->load_microcode(smu); + if (ret) + return ret; + } } } - ret = smu_check_fw_status(smu); + if (smu->ppt_funcs->check_fw_status) { + ret = smu->ppt_funcs->check_fw_status(smu); + if (ret) + pr_err("SMC is not ready\n"); + } + + return ret; +} + +static int smu_hw_init(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct smu_context *smu = &adev->smu; + + ret = smu_start_smc_engine(smu); if (ret) { - pr_err("SMC firmware status is not correct\n"); + pr_err("SMU is not ready yet!\n"); return ret; } if (adev->flags & AMD_IS_APU) { smu_powergate_sdma(&adev->smu, false); smu_powergate_vcn(&adev->smu, false); + smu_set_gfx_cgpg(&adev->smu, true); } if (!smu->pm_enabled) @@ -1291,6 +1282,11 @@ failed: return ret; } +static int smu_stop_dpms(struct smu_context *smu) +{ + return smu_send_smc_msg(smu, SMU_MSG_DisableAllSmuFeatures); +} + static int smu_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1303,6 +1299,33 @@ static int smu_hw_fini(void *handle) smu_powergate_vcn(&adev->smu, true); } + ret = smu_stop_thermal_control(smu); + if (ret) { + pr_warn("Fail to stop thermal control!\n"); + return ret; + } + + /* + * For custom pptable uploading, skip the DPM features + * disable process on Navi1x ASICs. + * - As the gfx related features are under control of + * RLC on those ASICs. RLC reinitialization will be + * needed to reenable them. That will cost much more + * efforts. + * + * - SMU firmware can handle the DPM reenablement + * properly. + */ + if (!smu->uploading_custom_pp_table || + !((adev->asic_type >= CHIP_NAVI10) && + (adev->asic_type <= CHIP_NAVI12))) { + ret = smu_stop_dpms(smu); + if (ret) { + pr_warn("Fail to stop Dpms!\n"); + return ret; + } + } + kfree(table_context->driver_pptable); table_context->driver_pptable = NULL; @@ -1344,7 +1367,10 @@ static int smu_suspend(void *handle) int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct smu_context *smu = &adev->smu; - bool baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); + bool baco_feature_is_enabled = false; + + if(!(adev->flags & AMD_IS_APU)) + baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT); ret = smu_system_features_control(smu, false); if (ret) @@ -1363,6 +1389,8 @@ static int smu_suspend(void *handle) if (adev->asic_type >= CHIP_NAVI10 && adev->gfx.rlc.funcs->stop) adev->gfx.rlc.funcs->stop(adev); + if (smu->is_apu) + smu_set_gfx_cgpg(&adev->smu, false); return 0; } @@ -1375,7 +1403,11 @@ static int smu_resume(void *handle) pr_info("SMU is resuming...\n"); - mutex_lock(&smu->mutex); + ret = smu_start_smc_engine(smu); + if (ret) { + pr_err("SMU is not ready yet!\n"); + goto failed; + } ret = smu_smc_table_hw_init(smu, false); if (ret) @@ -1385,13 +1417,16 @@ static int smu_resume(void *handle) if (ret) goto failed; - mutex_unlock(&smu->mutex); + if (smu->is_apu) + smu_set_gfx_cgpg(&adev->smu, true); + + smu->disable_uclk_switch = 0; pr_info("SMU is resumed successfully!\n"); return 0; + failed: - mutex_unlock(&smu->mutex); return ret; } @@ -1409,8 +1444,9 @@ int smu_display_configuration_change(struct smu_context *smu, mutex_lock(&smu->mutex); - smu_set_deep_sleep_dcefclk(smu, - display_config->min_dcef_deep_sleep_set_clk / 100); + if (smu->ppt_funcs->set_deep_sleep_dcefclk) + smu->ppt_funcs->set_deep_sleep_dcefclk(smu, + display_config->min_dcef_deep_sleep_set_clk / 100); for (index = 0; index < display_config->num_path_including_non_display; index++) { if (display_config->displays[index].controller_id != 0) @@ -1529,7 +1565,8 @@ static int smu_enable_umd_pstate(void *handle, struct smu_context *smu = (struct smu_context*)(handle); struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - if (!smu->pm_enabled || !smu_dpm_ctx->dpm_context) + + if (!smu->is_apu && (!smu->pm_enabled || !smu_dpm_ctx->dpm_context)) return -EINVAL; if (!(smu_dpm_ctx->dpm_level & profile_mode_mask)) { @@ -1587,9 +1624,9 @@ static int smu_default_set_performance_level(struct smu_context *smu, enum amd_d &soc_mask); if (ret) return ret; - smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask); - smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask); - smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask); + smu_force_clk_levels(smu, SMU_SCLK, 1 << sclk_mask, false); + smu_force_clk_levels(smu, SMU_MCLK, 1 << mclk_mask, false); + smu_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask, false); break; case AMD_DPM_FORCED_LEVEL_MANUAL: case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT: @@ -1653,7 +1690,7 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, workload = smu->workload_setting[index]; if (smu->power_profile_mode != workload) - smu_set_power_profile_mode(smu, &workload, 0); + smu_set_power_profile_mode(smu, &workload, 0, false); } return ret; @@ -1661,18 +1698,22 @@ int smu_adjust_power_state_dynamic(struct smu_context *smu, int smu_handle_task(struct smu_context *smu, enum amd_dpm_forced_level level, - enum amd_pp_task task_id) + enum amd_pp_task task_id, + bool lock_needed) { int ret = 0; + if (lock_needed) + mutex_lock(&smu->mutex); + switch (task_id) { case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: ret = smu_pre_display_config_changed(smu); if (ret) - return ret; + goto out; ret = smu_set_cpu_power_state(smu); if (ret) - return ret; + goto out; ret = smu_adjust_power_state_dynamic(smu, level, false); break; case AMD_PP_TASK_COMPLETE_INIT: @@ -1683,6 +1724,10 @@ int smu_handle_task(struct smu_context *smu, break; } +out: + if (lock_needed) + mutex_unlock(&smu->mutex); + return ret; } @@ -1715,7 +1760,7 @@ int smu_switch_power_profile(struct smu_context *smu, } if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) - smu_set_power_profile_mode(smu, &workload, 0); + smu_set_power_profile_mode(smu, &workload, 0, false); mutex_unlock(&smu->mutex); @@ -1727,7 +1772,7 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu) struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); enum amd_dpm_forced_level level; - if (!smu_dpm_ctx->dpm_context) + if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; mutex_lock(&(smu->mutex)); @@ -1742,15 +1787,22 @@ int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_lev struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); int ret = 0; - if (!smu_dpm_ctx->dpm_context) + if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; + mutex_lock(&smu->mutex); + ret = smu_enable_umd_pstate(smu, &level); - if (ret) + if (ret) { + mutex_unlock(&smu->mutex); return ret; + } ret = smu_handle_task(smu, level, - AMD_PP_TASK_READJUST_POWER_STATE); + AMD_PP_TASK_READJUST_POWER_STATE, + false); + + mutex_unlock(&smu->mutex); return ret; } @@ -1766,6 +1818,144 @@ int smu_set_display_count(struct smu_context *smu, uint32_t count) return ret; } +int smu_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t mask, + bool lock_needed) +{ + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + int ret = 0; + + if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { + pr_debug("force clock level is for dpm manual mode only.\n"); + return -EINVAL; + } + + if (lock_needed) + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) + ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); + + if (lock_needed) + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state) +{ + uint16_t msg; + int ret; + + /* + * The SMC is not fully ready. That may be + * expected as the IP may be masked. + * So, just return without error. + */ + if (!smu->pm_enabled) + return 0; + + mutex_lock(&smu->mutex); + + switch (mp1_state) { + case PP_MP1_STATE_SHUTDOWN: + msg = SMU_MSG_PrepareMp1ForShutdown; + break; + case PP_MP1_STATE_UNLOAD: + msg = SMU_MSG_PrepareMp1ForUnload; + break; + case PP_MP1_STATE_RESET: + msg = SMU_MSG_PrepareMp1ForReset; + break; + case PP_MP1_STATE_NONE: + default: + mutex_unlock(&smu->mutex); + return 0; + } + + /* some asics may not support those messages */ + if (smu_msg_get_index(smu, msg) < 0) { + mutex_unlock(&smu->mutex); + return 0; + } + + ret = smu_send_smc_msg(smu, msg); + if (ret) + pr_err("[PrepareMp1] Failed!\n"); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + int ret = 0; + + /* + * The SMC is not fully ready. That may be + * expected as the IP may be masked. + * So, just return without error. + */ + if (!smu->pm_enabled) + return 0; + + if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) + return 0; + + mutex_lock(&smu->mutex); + + ret = smu->ppt_funcs->set_df_cstate(smu, state); + if (ret) + pr_err("[SetDfCstate] failed!\n"); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_write_watermarks_table(struct smu_context *smu) +{ + int ret = 0; + struct smu_table_context *smu_table = &smu->smu_table; + struct smu_table *table = NULL; + + table = &smu_table->tables[SMU_TABLE_WATERMARKS]; + + if (!table->cpu_addr) + return -EINVAL; + + ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr, + true); + + return ret; +} + +int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, + struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) +{ + int ret = 0; + struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS]; + void *table = watermarks->cpu_addr; + + mutex_lock(&smu->mutex); + + if (!smu->disable_watermark && + smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && + smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { + smu_set_watermarks_table(smu, table, clock_ranges); + smu->watermarks_bitmap |= WATERMARKS_EXIST; + smu->watermarks_bitmap &= ~WATERMARKS_LOADED; + } + + mutex_unlock(&smu->mutex); + + return ret; +} + const struct amd_ip_funcs smu_ip_funcs = { .name = "smu", .early_init = smu_early_init, @@ -1802,3 +1992,559 @@ const struct amdgpu_ip_block_version smu_v12_0_ip_block = .rev = 0, .funcs = &smu_ip_funcs, }; + +int smu_load_microcode(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->load_microcode) + ret = smu->ppt_funcs->load_microcode(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_check_fw_status(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->check_fw_status) + ret = smu->ppt_funcs->check_fw_status(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_gfx_cgpg) + ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_fan_speed_rpm) + ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_power_limit(struct smu_context *smu, + uint32_t *limit, + bool def, + bool lock_needed) +{ + int ret = 0; + + if (lock_needed) + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_power_limit) + ret = smu->ppt_funcs->get_power_limit(smu, limit, def); + + if (lock_needed) + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_power_limit(struct smu_context *smu, uint32_t limit) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_power_limit) + ret = smu->ppt_funcs->set_power_limit(smu, limit); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->print_clk_levels) + ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_od_percentage) + ret = smu->ppt_funcs->get_od_percentage(smu, type); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_od_percentage) + ret = smu->ppt_funcs->set_od_percentage(smu, type, value); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->od_edit_dpm_table) + ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->read_sensor) + ret = smu->ppt_funcs->read_sensor(smu, sensor, data, size); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_power_profile_mode(struct smu_context *smu, char *buf) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_power_profile_mode) + ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_power_profile_mode(struct smu_context *smu, + long *param, + uint32_t param_size, + bool lock_needed) +{ + int ret = 0; + + if (lock_needed) + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_power_profile_mode) + ret = smu->ppt_funcs->set_power_profile_mode(smu, param, param_size); + + if (lock_needed) + mutex_unlock(&smu->mutex); + + return ret; +} + + +int smu_get_fan_control_mode(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_fan_control_mode) + ret = smu->ppt_funcs->get_fan_control_mode(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_fan_control_mode(struct smu_context *smu, int value) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_fan_control_mode) + ret = smu->ppt_funcs->set_fan_control_mode(smu, value); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_fan_speed_percent) + ret = smu->ppt_funcs->get_fan_speed_percent(smu, speed); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_fan_speed_percent) + ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_fan_speed_rpm) + ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_deep_sleep_dcefclk) + ret = smu->ppt_funcs->set_deep_sleep_dcefclk(smu, clk); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_active_display_count(struct smu_context *smu, uint32_t count) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_active_display_count) + ret = smu->ppt_funcs->set_active_display_count(smu, count); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_clock_by_type(struct smu_context *smu, + enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_clock_by_type) + ret = smu->ppt_funcs->get_clock_by_type(smu, type, clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_max_high_clocks(struct smu_context *smu, + struct amd_pp_simple_clock_info *clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_max_high_clocks) + ret = smu->ppt_funcs->get_max_high_clocks(smu, clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_clock_by_type_with_latency(struct smu_context *smu, + enum smu_clk_type clk_type, + struct pp_clock_levels_with_latency *clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_clock_by_type_with_latency) + ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_clock_by_type_with_voltage(struct smu_context *smu, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_clock_by_type_with_voltage) + ret = smu->ppt_funcs->get_clock_by_type_with_voltage(smu, type, clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + + +int smu_display_clock_voltage_request(struct smu_context *smu, + struct pp_display_clock_request *clock_req) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->display_clock_voltage_request) + ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); + + mutex_unlock(&smu->mutex); + + return ret; +} + + +int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch) +{ + int ret = -EINVAL; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->display_disable_memory_clock_switch) + ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_notify_smu_enable_pwe(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->notify_smu_enable_pwe) + ret = smu->ppt_funcs->notify_smu_enable_pwe(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_xgmi_pstate) + ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_set_azalia_d3_pme(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->set_azalia_d3_pme) + ret = smu->ppt_funcs->set_azalia_d3_pme(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +bool smu_baco_is_support(struct smu_context *smu) +{ + bool ret = false; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->baco_is_support) + ret = smu->ppt_funcs->baco_is_support(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state) +{ + if (smu->ppt_funcs->baco_get_state) + return -EINVAL; + + mutex_lock(&smu->mutex); + *state = smu->ppt_funcs->baco_get_state(smu); + mutex_unlock(&smu->mutex); + + return 0; +} + +int smu_baco_reset(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->baco_reset) + ret = smu->ppt_funcs->baco_reset(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_mode2_reset(struct smu_context *smu) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->mode2_reset) + ret = smu->ppt_funcs->mode2_reset(smu); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) + ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); + + mutex_unlock(&smu->mutex); + + return ret; +} + +int smu_get_uclk_dpm_states(struct smu_context *smu, + unsigned int *clock_values_in_khz, + unsigned int *num_states) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_uclk_dpm_states) + ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); + + mutex_unlock(&smu->mutex); + + return ret; +} + +enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu) +{ + enum amd_pm_state_type pm_state = POWER_STATE_TYPE_DEFAULT; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_current_power_state) + pm_state = smu->ppt_funcs->get_current_power_state(smu); + + mutex_unlock(&smu->mutex); + + return pm_state; +} + +int smu_get_dpm_clock_table(struct smu_context *smu, + struct dpm_clocks *clock_table) +{ + int ret = 0; + + mutex_lock(&smu->mutex); + + if (smu->ppt_funcs->get_dpm_clock_table) + ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); + + mutex_unlock(&smu->mutex); + + return ret; +} + +uint32_t smu_get_pptable_power_limit(struct smu_context *smu) +{ + uint32_t ret = 0; + + if (smu->ppt_funcs->get_pptable_power_limit) + ret = smu->ppt_funcs->get_pptable_power_limit(smu); + + return ret; +} diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c index d493a3f8c07a..58c7c4a3053e 100644 --- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c @@ -25,6 +25,7 @@ #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" @@ -36,6 +37,12 @@ #include "smu_v11_0_pptable.h" #include "arcturus_ppsmc.h" #include "nbio/nbio_7_4_sh_mask.h" +#include "amdgpu_xgmi.h" +#include <linux/i2c.h> +#include <linux/pci.h> +#include "amdgpu_ras.h" + +#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev #define CTF_OFFSET_EDGE 5 #define CTF_OFFSET_HOTSPOT 5 @@ -112,8 +119,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(PrepareMp1ForShutdown, PPSMC_MSG_PrepareMp1ForShutdown), MSG_MAP(SoftReset, PPSMC_MSG_SoftReset), MSG_MAP(RunAfllBtc, PPSMC_MSG_RunAfllBtc), - MSG_MAP(RunGfxDcBtc, PPSMC_MSG_RunGfxDcBtc), - MSG_MAP(RunSocDcBtc, PPSMC_MSG_RunSocDcBtc), + MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc), MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh), MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow), MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize), @@ -172,6 +178,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_table_map[SMU_TABLE_COUNT] = { TAB_MAP(SMU_METRICS), TAB_MAP(DRIVER_SMU_CONFIG), TAB_MAP(OVERDRIVE), + TAB_MAP(I2C_COMMANDS), }; static struct smu_11_0_cmn2aisc_mapping arcturus_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -294,6 +301,9 @@ static int arcturus_tables_init(struct smu_context *smu, struct smu_table *table SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_I2C_COMMANDS, sizeof(SwI2cRequest_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL); if (!smu_table->metrics_table) return -ENOMEM; @@ -528,9 +538,17 @@ static int arcturus_append_powerplay_table(struct smu_context *smu) return 0; } -static int arcturus_run_btc_afll(struct smu_context *smu) +static int arcturus_run_btc(struct smu_context *smu) { - return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); + int ret = 0; + + ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc); + if (ret) { + pr_err("RunAfllBtc failed!\n"); + return ret; + } + + return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc); } static int arcturus_populate_umd_state_clk(struct smu_context *smu) @@ -610,12 +628,17 @@ static int arcturus_print_clk_levels(struct smu_context *smu, return ret; } + /* + * For DPM disabled case, there will be only one clock level. + * And it's safe to assume that is always the current clock. + */ for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, - arcturus_freqs_in_same_level( + (clocks.num_levels == 1) ? "*" : + (arcturus_freqs_in_same_level( clocks.data[i].clocks_in_khz / 1000, - now / 100) ? "*" : ""); + now / 100) ? "*" : "")); break; case SMU_MCLK: @@ -635,9 +658,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu, for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, - arcturus_freqs_in_same_level( + (clocks.num_levels == 1) ? "*" : + (arcturus_freqs_in_same_level( clocks.data[i].clocks_in_khz / 1000, - now / 100) ? "*" : ""); + now / 100) ? "*" : "")); break; case SMU_SOCCLK: @@ -657,9 +681,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu, for (i = 0; i < clocks.num_levels; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, clocks.data[i].clocks_in_khz / 1000, - arcturus_freqs_in_same_level( + (clocks.num_levels == 1) ? "*" : + (arcturus_freqs_in_same_level( clocks.data[i].clocks_in_khz / 1000, - now / 100) ? "*" : ""); + now / 100) ? "*" : "")); break; case SMU_FCLK: @@ -679,9 +704,10 @@ static int arcturus_print_clk_levels(struct smu_context *smu, for (i = 0; i < single_dpm_table->count; i++) size += sprintf(buf + size, "%d: %uMhz %s\n", i, single_dpm_table->dpm_levels[i].value, - arcturus_freqs_in_same_level( + (clocks.num_levels == 1) ? "*" : + (arcturus_freqs_in_same_level( clocks.data[i].clocks_in_khz / 1000, - now / 100) ? "*" : ""); + now / 100) ? "*" : "")); break; default: @@ -756,8 +782,6 @@ static int arcturus_force_clk_levels(struct smu_context *smu, uint32_t soft_min_level, soft_max_level; int ret = 0; - mutex_lock(&(smu->mutex)); - soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0; @@ -792,91 +816,19 @@ static int arcturus_force_clk_levels(struct smu_context *smu, break; case SMU_MCLK: - single_dpm_table = &(dpm_table->mem_table); - - if (soft_max_level >= single_dpm_table->count) { - pr_err("Clock level specified %d is over max allowed %d\n", - soft_max_level, single_dpm_table->count - 1); - ret = -EINVAL; - break; - } - - single_dpm_table->dpm_state.soft_min_level = - single_dpm_table->dpm_levels[soft_min_level].value; - single_dpm_table->dpm_state.soft_max_level = - single_dpm_table->dpm_levels[soft_max_level].value; - - ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_UCLK_MASK); - if (ret) { - pr_err("Failed to upload boot level to lowest!\n"); - break; - } - - ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_UCLK_MASK); - if (ret) - pr_err("Failed to upload dpm max level to highest!\n"); - - break; - case SMU_SOCCLK: - single_dpm_table = &(dpm_table->soc_table); - - if (soft_max_level >= single_dpm_table->count) { - pr_err("Clock level specified %d is over max allowed %d\n", - soft_max_level, single_dpm_table->count - 1); - ret = -EINVAL; - break; - } - - single_dpm_table->dpm_state.soft_min_level = - single_dpm_table->dpm_levels[soft_min_level].value; - single_dpm_table->dpm_state.soft_max_level = - single_dpm_table->dpm_levels[soft_max_level].value; - - ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_SOCCLK_MASK); - if (ret) { - pr_err("Failed to upload boot level to lowest!\n"); - break; - } - - ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_SOCCLK_MASK); - if (ret) - pr_err("Failed to upload dpm max level to highest!\n"); - - break; - case SMU_FCLK: - single_dpm_table = &(dpm_table->fclk_table); - - if (soft_max_level >= single_dpm_table->count) { - pr_err("Clock level specified %d is over max allowed %d\n", - soft_max_level, single_dpm_table->count - 1); - ret = -EINVAL; - break; - } - - single_dpm_table->dpm_state.soft_min_level = - single_dpm_table->dpm_levels[soft_min_level].value; - single_dpm_table->dpm_state.soft_max_level = - single_dpm_table->dpm_levels[soft_max_level].value; - - ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_FCLK_MASK); - if (ret) { - pr_err("Failed to upload boot level to lowest!\n"); - break; - } - - ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_FCLK_MASK); - if (ret) - pr_err("Failed to upload dpm max level to highest!\n"); - + /* + * Should not arrive here since Arcturus does not + * support mclk/socclk/fclk softmin/softmax settings + */ + ret = -EINVAL; break; default: break; } - mutex_unlock(&(smu->mutex)); return ret; } @@ -1043,7 +995,7 @@ static int arcturus_read_sensor(struct smu_context *smu, *size = 4; break; default: - ret = smu_smc_read_sensor(smu, sensor, data, size); + ret = smu_v11_0_read_sensor(smu, sensor, data, size); } mutex_unlock(&smu->sensor_lock); @@ -1186,6 +1138,7 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest) { struct arcturus_dpm_table *dpm_table = (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context; + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0); uint32_t soft_level; int ret = 0; @@ -1199,40 +1152,27 @@ static int arcturus_force_dpm_limit_value(struct smu_context *smu, bool highest) dpm_table->gfx_table.dpm_state.soft_max_level = dpm_table->gfx_table.dpm_levels[soft_level].value; - /* uclk */ - if (highest) - soft_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table)); - else - soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table)); - - dpm_table->mem_table.dpm_state.soft_min_level = - dpm_table->mem_table.dpm_state.soft_max_level = - dpm_table->mem_table.dpm_levels[soft_level].value; - - /* socclk */ - if (highest) - soft_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table)); - else - soft_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table)); - - dpm_table->soc_table.dpm_state.soft_min_level = - dpm_table->soc_table.dpm_state.soft_max_level = - dpm_table->soc_table.dpm_levels[soft_level].value; - - ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF); + ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK); if (ret) { pr_err("Failed to upload boot level to %s!\n", highest ? "highest" : "lowest"); return ret; } - ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF); + ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK); if (ret) { pr_err("Failed to upload dpm max level to %s!\n!", highest ? "highest" : "lowest"); return ret; } + if (hive) + /* + * Force XGMI Pstate to highest or lowest + * TODO: revise this when xgmi dpm is functional + */ + ret = smu_v11_0_set_xgmi_pstate(smu, highest ? 1 : 0); + return ret; } @@ -1240,6 +1180,7 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu) { struct arcturus_dpm_table *dpm_table = (struct arcturus_dpm_table *)smu->smu_dpm.dpm_context; + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(smu->adev, 0); uint32_t soft_min_level, soft_max_level; int ret = 0; @@ -1251,34 +1192,25 @@ static int arcturus_unforce_dpm_levels(struct smu_context *smu) dpm_table->gfx_table.dpm_state.soft_max_level = dpm_table->gfx_table.dpm_levels[soft_max_level].value; - /* uclk */ - soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->mem_table)); - soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->mem_table)); - dpm_table->mem_table.dpm_state.soft_min_level = - dpm_table->gfx_table.dpm_levels[soft_min_level].value; - dpm_table->mem_table.dpm_state.soft_max_level = - dpm_table->gfx_table.dpm_levels[soft_max_level].value; - - /* socclk */ - soft_min_level = arcturus_find_lowest_dpm_level(&(dpm_table->soc_table)); - soft_max_level = arcturus_find_highest_dpm_level(&(dpm_table->soc_table)); - dpm_table->soc_table.dpm_state.soft_min_level = - dpm_table->soc_table.dpm_levels[soft_min_level].value; - dpm_table->soc_table.dpm_state.soft_max_level = - dpm_table->soc_table.dpm_levels[soft_max_level].value; - - ret = arcturus_upload_dpm_level(smu, false, 0xFFFFFFFF); + ret = arcturus_upload_dpm_level(smu, false, FEATURE_DPM_GFXCLK_MASK); if (ret) { pr_err("Failed to upload DPM Bootup Levels!"); return ret; } - ret = arcturus_upload_dpm_level(smu, true, 0xFFFFFFFF); + ret = arcturus_upload_dpm_level(smu, true, FEATURE_DPM_GFXCLK_MASK); if (ret) { pr_err("Failed to upload DPM Max Levels!"); return ret; } + if (hive) + /* + * Reset XGMI Pstate back to default + * TODO: revise this when xgmi dpm is functional + */ + ret = smu_v11_0_set_xgmi_pstate(smu, 0); + return ret; } @@ -1329,15 +1261,14 @@ arcturus_get_profiling_clk_mask(struct smu_context *smu, static int arcturus_get_power_limit(struct smu_context *smu, uint32_t *limit, - bool asic_default) + bool cap) { PPTable_t *pptable = smu->smu_table.driver_pptable; uint32_t asic_default_power_limit = 0; int ret = 0; int power_src; - if (!smu->default_power_limit || - !smu->power_limit) { + if (!smu->power_limit) { if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC); if (power_src < 0) @@ -1360,17 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu, pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; } - if (smu->od_enabled) { - asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit); - asic_default_power_limit /= 100; - } - - smu->default_power_limit = asic_default_power_limit; smu->power_limit = asic_default_power_limit; } - if (asic_default) - *limit = smu->default_power_limit; + if (cap) + *limit = smu_v11_0_get_max_power_limit(smu); else *limit = smu->power_limit; @@ -1891,6 +1816,260 @@ static bool arcturus_is_dpm_running(struct smu_context *smu) return !!(feature_enabled & SMC_DPM_FEATURE); } +static int arcturus_dpm_set_uvd_enable(struct smu_context *smu, bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + if (enable) { + if (!smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 1); + if (ret) { + pr_err("[EnableVCNDPM] failed!\n"); + return ret; + } + } + power_gate->vcn_gated = false; + } else { + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_feature_set_enabled(smu, SMU_FEATURE_VCN_PG_BIT, 0); + if (ret) { + pr_err("[DisableVCNDPM] failed!\n"); + return ret; + } + } + power_gate->vcn_gated = true; + } + + return ret; +} + + +static void arcturus_fill_eeprom_i2c_req(SwI2cRequest_t *req, bool write, + uint8_t address, uint32_t numbytes, + uint8_t *data) +{ + int i; + + BUG_ON(numbytes > MAX_SW_I2C_COMMANDS); + + req->I2CcontrollerPort = 0; + req->I2CSpeed = 2; + req->SlaveAddress = address; + req->NumCmds = numbytes; + + for (i = 0; i < numbytes; i++) { + SwI2cCmd_t *cmd = &req->SwI2cCmds[i]; + + /* First 2 bytes are always write for lower 2b EEPROM address */ + if (i < 2) + cmd->Cmd = 1; + else + cmd->Cmd = write; + + + /* Add RESTART for read after address filled */ + cmd->CmdConfig |= (i == 2 && !write) ? CMDCONFIG_RESTART_MASK : 0; + + /* Add STOP in the end */ + cmd->CmdConfig |= (i == (numbytes - 1)) ? CMDCONFIG_STOP_MASK : 0; + + /* Fill with data regardless if read or write to simplify code */ + cmd->RegisterAddr = data[i]; + } +} + +static int arcturus_i2c_eeprom_read_data(struct i2c_adapter *control, + uint8_t address, + uint8_t *data, + uint32_t numbytes) +{ + uint32_t i, ret = 0; + SwI2cRequest_t req; + struct amdgpu_device *adev = to_amdgpu_device(control); + struct smu_table_context *smu_table = &adev->smu.smu_table; + struct smu_table *table = &smu_table->tables[SMU_TABLE_I2C_COMMANDS]; + + memset(&req, 0, sizeof(req)); + arcturus_fill_eeprom_i2c_req(&req, false, address, numbytes, data); + + mutex_lock(&adev->smu.mutex); + /* Now read data starting with that address */ + ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, + true); + mutex_unlock(&adev->smu.mutex); + + if (!ret) { + SwI2cRequest_t *res = (SwI2cRequest_t *)table->cpu_addr; + + /* Assume SMU fills res.SwI2cCmds[i].Data with read bytes */ + for (i = 0; i < numbytes; i++) + data[i] = res->SwI2cCmds[i].Data; + + pr_debug("arcturus_i2c_eeprom_read_data, address = %x, bytes = %d, data :", + (uint16_t)address, numbytes); + + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, + 8, 1, data, numbytes, false); + } else + pr_err("arcturus_i2c_eeprom_read_data - error occurred :%x", ret); + + return ret; +} + +static int arcturus_i2c_eeprom_write_data(struct i2c_adapter *control, + uint8_t address, + uint8_t *data, + uint32_t numbytes) +{ + uint32_t ret; + SwI2cRequest_t req; + struct amdgpu_device *adev = to_amdgpu_device(control); + + memset(&req, 0, sizeof(req)); + arcturus_fill_eeprom_i2c_req(&req, true, address, numbytes, data); + + mutex_lock(&adev->smu.mutex); + ret = smu_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, &req, true); + mutex_unlock(&adev->smu.mutex); + + if (!ret) { + pr_debug("arcturus_i2c_write(), address = %x, bytes = %d , data: ", + (uint16_t)address, numbytes); + + print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_NONE, + 8, 1, data, numbytes, false); + /* + * According to EEPROM spec there is a MAX of 10 ms required for + * EEPROM to flush internal RX buffer after STOP was issued at the + * end of write transaction. During this time the EEPROM will not be + * responsive to any more commands - so wait a bit more. + */ + msleep(10); + + } else + pr_err("arcturus_i2c_write- error occurred :%x", ret); + + return ret; +} + +static int arcturus_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, int num) +{ + uint32_t i, j, ret, data_size, data_chunk_size, next_eeprom_addr = 0; + uint8_t *data_ptr, data_chunk[MAX_SW_I2C_COMMANDS] = { 0 }; + + for (i = 0; i < num; i++) { + /* + * SMU interface allows at most MAX_SW_I2C_COMMANDS bytes of data at + * once and hence the data needs to be spliced into chunks and sent each + * chunk separately + */ + data_size = msgs[i].len - 2; + data_chunk_size = MAX_SW_I2C_COMMANDS - 2; + next_eeprom_addr = (msgs[i].buf[0] << 8 & 0xff00) | (msgs[i].buf[1] & 0xff); + data_ptr = msgs[i].buf + 2; + + for (j = 0; j < data_size / data_chunk_size; j++) { + /* Insert the EEPROM dest addess, bits 0-15 */ + data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff); + data_chunk[1] = (next_eeprom_addr & 0xff); + + if (msgs[i].flags & I2C_M_RD) { + ret = arcturus_i2c_eeprom_read_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, MAX_SW_I2C_COMMANDS); + + memcpy(data_ptr, data_chunk + 2, data_chunk_size); + } else { + + memcpy(data_chunk + 2, data_ptr, data_chunk_size); + + ret = arcturus_i2c_eeprom_write_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, MAX_SW_I2C_COMMANDS); + } + + if (ret) { + num = -EIO; + goto fail; + } + + next_eeprom_addr += data_chunk_size; + data_ptr += data_chunk_size; + } + + if (data_size % data_chunk_size) { + data_chunk[0] = ((next_eeprom_addr >> 8) & 0xff); + data_chunk[1] = (next_eeprom_addr & 0xff); + + if (msgs[i].flags & I2C_M_RD) { + ret = arcturus_i2c_eeprom_read_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, (data_size % data_chunk_size) + 2); + + memcpy(data_ptr, data_chunk + 2, data_size % data_chunk_size); + } else { + memcpy(data_chunk + 2, data_ptr, data_size % data_chunk_size); + + ret = arcturus_i2c_eeprom_write_data(i2c_adap, + (uint8_t)msgs[i].addr, + data_chunk, (data_size % data_chunk_size) + 2); + } + + if (ret) { + num = -EIO; + goto fail; + } + } + } + +fail: + return num; +} + +static u32 arcturus_i2c_eeprom_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + + +static const struct i2c_algorithm arcturus_i2c_eeprom_i2c_algo = { + .master_xfer = arcturus_i2c_eeprom_i2c_xfer, + .functionality = arcturus_i2c_eeprom_i2c_func, +}; + +static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control) +{ + struct amdgpu_device *adev = to_amdgpu_device(control); + int res; + + control->owner = THIS_MODULE; + control->class = I2C_CLASS_SPD; + control->dev.parent = &adev->pdev->dev; + control->algo = &arcturus_i2c_eeprom_i2c_algo; + snprintf(control->name, sizeof(control->name), "RAS EEPROM"); + + res = i2c_add_adapter(control); + if (res) + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + + return res; +} + +static void arcturus_i2c_eeprom_control_fini(struct i2c_adapter *control) +{ + i2c_del_adapter(control); +} + +static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + + return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; +} + static const struct pptable_funcs arcturus_ppt_funcs = { /* translate smu index into arcturus specific index */ .get_smu_msg_index = arcturus_get_smu_msg_index, @@ -1909,7 +2088,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { /* init dpm */ .get_allowed_feature_mask = arcturus_get_allowed_feature_mask, /* btc */ - .run_afll_btc = arcturus_run_btc_afll, + .run_btc = arcturus_run_btc, /* dpm/clk tables */ .set_default_dpm_table = arcturus_set_default_dpm_table, .populate_umd_state_clk = arcturus_populate_umd_state_clk, @@ -1929,12 +2108,62 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .dump_pptable = arcturus_dump_pptable, .get_power_limit = arcturus_get_power_limit, .is_dpm_running = arcturus_is_dpm_running, + .dpm_set_uvd_enable = arcturus_dpm_set_uvd_enable, + .i2c_eeprom_init = arcturus_i2c_eeprom_control_init, + .i2c_eeprom_fini = arcturus_i2c_eeprom_control_fini, + .init_microcode = smu_v11_0_init_microcode, + .load_microcode = smu_v11_0_load_microcode, + .init_smc_tables = smu_v11_0_init_smc_tables, + .fini_smc_tables = smu_v11_0_fini_smc_tables, + .init_power = smu_v11_0_init_power, + .fini_power = smu_v11_0_fini_power, + .check_fw_status = smu_v11_0_check_fw_status, + .setup_pptable = smu_v11_0_setup_pptable, + .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, + .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios, + .check_pptable = smu_v11_0_check_pptable, + .parse_pptable = smu_v11_0_parse_pptable, + .populate_smc_tables = smu_v11_0_populate_smc_pptable, + .check_fw_version = smu_v11_0_check_fw_version, + .write_pptable = smu_v11_0_write_pptable, + .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, + .set_tool_table_location = smu_v11_0_set_tool_table_location, + .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, + .system_features_control = smu_v11_0_system_features_control, + .send_smc_msg = smu_v11_0_send_msg, + .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, + .read_smc_arg = smu_v11_0_read_arg, + .init_display_count = smu_v11_0_init_display_count, + .set_allowed_mask = smu_v11_0_set_allowed_mask, + .get_enabled_mask = smu_v11_0_get_enabled_mask, + .notify_display_change = smu_v11_0_notify_display_change, + .set_power_limit = smu_v11_0_set_power_limit, + .get_current_clk_freq = smu_v11_0_get_current_clk_freq, + .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, + .start_thermal_control = smu_v11_0_start_thermal_control, + .stop_thermal_control = smu_v11_0_stop_thermal_control, + .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, + .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, + .get_fan_control_mode = smu_v11_0_get_fan_control_mode, + .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, + .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, + .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, + .gfx_off_control = smu_v11_0_gfx_off_control, + .register_irq_handler = smu_v11_0_register_irq_handler, + .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, + .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, + .baco_is_support= smu_v11_0_baco_is_support, + .baco_get_state = smu_v11_0_baco_get_state, + .baco_set_state = smu_v11_0_baco_set_state, + .baco_reset = smu_v11_0_baco_reset, + .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, + .override_pcie_parameters = smu_v11_0_override_pcie_parameters, + .get_pptable_power_limit = arcturus_get_pptable_power_limit, }; void arcturus_set_ppt_funcs(struct smu_context *smu) { - struct smu_table_context *smu_table = &smu->smu_table; - smu->ppt_funcs = &arcturus_ppt_funcs; - smu_table->table_count = TABLE_COUNT; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile index cc63705920dc..2773966ae434 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/Makefile @@ -36,7 +36,8 @@ HARDWARE_MGR = hwmgr.o processpptables.o \ pp_overdriver.o smu_helper.o \ vega20_processpptables.o vega20_hwmgr.o vega20_powertune.o \ vega20_thermal.o common_baco.o vega10_baco.o vega20_baco.o \ - vega12_baco.o smu9_baco.o + vega12_baco.o smu9_baco.o tonga_baco.o polaris_baco.o fiji_baco.o \ + ci_baco.o smu7_baco.o AMD_PP_HWMGR = $(addprefix $(AMD_PP_PATH)/hwmgr/,$(HARDWARE_MGR)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c new file mode 100644 index 000000000000..3be40114e63d --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.c @@ -0,0 +1,195 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "ci_baco.h" + +#include "gmc/gmc_7_1_d.h" +#include "gmc/gmc_7_1_sh_mask.h" + +#include "bif/bif_4_1_d.h" +#include "bif/bif_4_1_sh_mask.h" + +#include "dce/dce_8_0_d.h" +#include "dce/dce_8_0_sh_mask.h" + +#include "smu/smu_7_0_1_d.h" +#include "smu/smu_7_0_1_sh_mask.h" + +#include "gca/gfx_7_2_d.h" +#include "gca/gfx_7_2_sh_mask.h" + +static const struct baco_cmd_entry gpio_tbl[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }, + { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff }, + { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 }, + { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } +}; + +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl[] = +{ + { CMD_READMODIFYWRITE, mmDISPPLL_BG_CNTL, DISPPLL_BG_CNTL__DISPPLL_BG_PDN_MASK, DISPPLL_BG_CNTL__DISPPLL_BG_PDN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_DC }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__OSC_EN_MASK, CG_CLKPIN_CNTL_DC__OSC_EN__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_DC__XTALIN_SEL_MASK, CG_CLKPIN_CNTL_DC__XTALIN_SEL__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_RESET_MASK, PLL_CNTL__PLL_RESET__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_POWER_DOWN_MASK, PLL_CNTL__PLL_POWER_DOWN__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmPLL_CNTL, PLL_CNTL__PLL_BYPASS_CAL_MASK, PLL_CNTL__PLL_BYPASS_CAL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 }, + { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL_MASK, MISC_CLK_CTRL__DFT_SMS_PG_CLK_SEL__SHIFT, 0, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x2 } +}; + +static const struct baco_cmd_entry enter_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 } +}; + +#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK + +static const struct baco_cmd_entry exit_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_DELAY_MS, 0, 0, 0, 20, 0 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x20 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x10 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, + { CMD_WRITE, mmCP_PFP_UCODE_ADDR, 0, 0, 0, 0 } +}; + +int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + enum BACO_STATE cur_state; + + smu7_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl)); + baco_program_registers(hwmgr, enable_fb_req_rej_tbl, + ARRAY_SIZE(enable_fb_req_rej_tbl)); + baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl)); + baco_program_registers(hwmgr, turn_off_plls_tbl, + ARRAY_SIZE(turn_off_plls_tbl)); + if (baco_program_registers(hwmgr, enter_baco_tbl, + ARRAY_SIZE(enter_baco_tbl))) + return 0; + + } else if (state == BACO_STATE_OUT) { + /* HW requires at least 20ms between regulator off and on */ + msleep(20); + /* Execute Hardware BACO exit sequence */ + if (baco_program_registers(hwmgr, exit_baco_tbl, + ARRAY_SIZE(exit_baco_tbl))) { + if (baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return 0; + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h new file mode 100644 index 000000000000..17041f187020 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ci_baco.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __CI_BACO_H__ +#define __CI_BACO_H__ +#include "smu7_baco.h" + +extern int ci_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c index 9c57c1f67749..1c73776bd606 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.c @@ -79,6 +79,25 @@ static bool baco_cmd_handler(struct pp_hwmgr *hwmgr, u32 command, u32 reg, u32 m return ret; } +bool baco_program_registers(struct pp_hwmgr *hwmgr, + const struct baco_cmd_entry *entry, + const u32 array_size) +{ + u32 i, reg = 0; + + for (i = 0; i < array_size; i++) { + if ((entry[i].cmd == CMD_WRITE) || + (entry[i].cmd == CMD_READMODIFYWRITE) || + (entry[i].cmd == CMD_WAITFOR)) + reg = entry[i].reg_offset; + if (!baco_cmd_handler(hwmgr, entry[i].cmd, reg, entry[i].mask, + entry[i].shift, entry[i].val, entry[i].timeout)) + return false; + } + + return true; +} + bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr, const struct soc15_baco_cmd_entry *entry, const u32 array_size) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h index 95296c916f4e..8393eb62706d 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/common_baco.h @@ -33,6 +33,15 @@ enum baco_cmd_type { CMD_DELAY_US, }; +struct baco_cmd_entry { + enum baco_cmd_type cmd; + uint32_t reg_offset; + uint32_t mask; + uint32_t shift; + uint32_t timeout; + uint32_t val; +}; + struct soc15_baco_cmd_entry { enum baco_cmd_type cmd; uint32_t hwip; @@ -44,6 +53,10 @@ struct soc15_baco_cmd_entry { uint32_t timeout; uint32_t val; }; + +extern bool baco_program_registers(struct pp_hwmgr *hwmgr, + const struct baco_cmd_entry *entry, + const u32 array_size); extern bool soc15_baco_program_registers(struct pp_hwmgr *hwmgr, const struct soc15_baco_cmd_entry *entry, const u32 array_size); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c new file mode 100644 index 000000000000..c0368f2dfb21 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.c @@ -0,0 +1,196 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "fiji_baco.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" + + +static const struct baco_cmd_entry gpio_tbl[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }, + { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff }, + { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 }, + { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } +}; + +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 } +}; + +static const struct baco_cmd_entry clk_req_b_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 } +}; + +static const struct baco_cmd_entry enter_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 } +}; + +#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK + +static const struct baco_cmd_entry exit_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_0, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_1, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_2, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_3, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_4, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_5, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_8, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_9, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_10, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_11, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_12, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_13, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_14, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_15, 0, 0, 0, 0 } +}; + +int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + enum BACO_STATE cur_state; + + smu7_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl)); + baco_program_registers(hwmgr, enable_fb_req_rej_tbl, + ARRAY_SIZE(enable_fb_req_rej_tbl)); + baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl)); + baco_program_registers(hwmgr, turn_off_plls_tbl, + ARRAY_SIZE(turn_off_plls_tbl)); + baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl)); + if (baco_program_registers(hwmgr, enter_baco_tbl, + ARRAY_SIZE(enter_baco_tbl))) + return 0; + + } else if (state == BACO_STATE_OUT) { + /* HW requires at least 20ms between regulator off and on */ + msleep(20); + /* Execute Hardware BACO exit sequence */ + if (baco_program_registers(hwmgr, exit_baco_tbl, + ARRAY_SIZE(exit_baco_tbl))) { + if (baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return 0; + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h new file mode 100644 index 000000000000..47f402900bdb --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_baco.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __FIJI_BACO_H__ +#define __FIJI_BACO_H__ +#include "smu7_baco.h" + +extern int fiji_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index a24beaa4fb01..d2909c91d65b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -81,6 +81,8 @@ static void hwmgr_init_workload_prority(struct pp_hwmgr *hwmgr) int hwmgr_early_init(struct pp_hwmgr *hwmgr) { + struct amdgpu_device *adev; + if (!hwmgr) return -EINVAL; @@ -94,8 +96,11 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) hwmgr_init_workload_prority(hwmgr); hwmgr->gfxoff_state_changed_by_workload = false; + adev = hwmgr->adev; + switch (hwmgr->chip_family) { case AMDGPU_FAMILY_CI: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->smumgr_funcs = &ci_smu_funcs; ci_set_asic_special_caps(hwmgr); hwmgr->feature_mask &= ~(PP_VBI_TIME_SUPPORT_MASK | @@ -106,12 +111,14 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) smu7_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_CZ: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->od_enabled = false; hwmgr->smumgr_funcs = &smu8_smu_funcs; hwmgr->feature_mask &= ~PP_GFXOFF_MASK; smu8_init_function_pointers(hwmgr); break; case AMDGPU_FAMILY_VI: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->feature_mask &= ~PP_GFXOFF_MASK; switch (hwmgr->chip_id) { case CHIP_TOPAZ: @@ -153,6 +160,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) case AMDGPU_FAMILY_AI: switch (hwmgr->chip_id) { case CHIP_VEGA10: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->feature_mask &= ~PP_GFXOFF_MASK; hwmgr->smumgr_funcs = &vega10_smu_funcs; vega10_hwmgr_init(hwmgr); @@ -162,6 +170,7 @@ int hwmgr_early_init(struct pp_hwmgr *hwmgr) vega12_hwmgr_init(hwmgr); break; case CHIP_VEGA20: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; hwmgr->feature_mask &= ~PP_GFXOFF_MASK; hwmgr->smumgr_funcs = &vega20_smu_funcs; vega20_hwmgr_init(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c new file mode 100644 index 000000000000..8f8e296f2fe9 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.c @@ -0,0 +1,222 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "polaris_baco.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_11_0_d.h" +#include "dce/dce_11_0_sh_mask.h" + +#include "smu/smu_7_1_3_d.h" +#include "smu/smu_7_1_3_sh_mask.h" + +static const struct baco_cmd_entry gpio_tbl[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }, + { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff }, + { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 }, + { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } +}; + +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl[] = +{ + { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 }, + { CMD_DELAY_US, 0, 0, 0, 1, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_READMODIFYWRITE, 0xda2, 0x40, 0x6, 0, 0x0 }, + { CMD_DELAY_US, 0, 0, 0, 3, 0x0 }, + { CMD_READMODIFYWRITE, 0xda2, 0x8, 0x3, 0, 0x0 }, + { CMD_READMODIFYWRITE, 0xda2, 0x3fff00, 0x8, 0, 0x32 }, + { CMD_DELAY_US, 0, 0, 0, 3, 0x0 }, + { CMD_READMODIFYWRITE, mmMPLL_FUNC_CNTL_2, MPLL_FUNC_CNTL_2__ISO_DIS_P_MASK, MPLL_FUNC_CNTL_2__ISO_DIS_P__SHIFT, 0, 0x0 }, + { CMD_DELAY_US, 0, 0, 0, 5, 0x0 } +}; + +static const struct baco_cmd_entry clk_req_b_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 } +}; + +static const struct baco_cmd_entry enter_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 } +}; + +#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK + +static const struct baco_cmd_entry exit_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl_vg[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixGCK_DFS_BYPASS_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, GCK_DFS_BYPASS_CNTL__BYPASSACLK_MASK, GCK_DFS_BYPASS_CNTL__BYPASSACLK__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl_vg[] = +{ + { CMD_READMODIFYWRITE, mmDC_GPIO_PAD_STRENGTH_1, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP_MASK, DC_GPIO_PAD_STRENGTH_1__GENLK_STRENGTH_SP__SHIFT, 0, 0x1 }, + { CMD_DELAY_US, 0, 0, 0, 1, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_DRAM, MC_SEQ_DRAM__RST_CTL_MASK, MC_SEQ_DRAM__RST_CTL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC05002B0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC050032C }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x10, 0x4, 0, 0x1 }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, 0x10, 0, 1, 0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500080 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_DELAY_US, 0, 0, 0, 3, 0x0 }, + { CMD_DELAY_US, 0, 0, 0, 3, 0x0 }, + { CMD_DELAY_US, 0, 0, 0, 5, 0x0 } +}; + +int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + enum BACO_STATE cur_state; + + smu7_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl)); + baco_program_registers(hwmgr, enable_fb_req_rej_tbl, + ARRAY_SIZE(enable_fb_req_rej_tbl)); + if (hwmgr->chip_id == CHIP_VEGAM) { + baco_program_registers(hwmgr, use_bclk_tbl_vg, ARRAY_SIZE(use_bclk_tbl_vg)); + baco_program_registers(hwmgr, turn_off_plls_tbl_vg, + ARRAY_SIZE(turn_off_plls_tbl_vg)); + } else { + baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl)); + baco_program_registers(hwmgr, turn_off_plls_tbl, + ARRAY_SIZE(turn_off_plls_tbl)); + } + baco_program_registers(hwmgr, clk_req_b_tbl, ARRAY_SIZE(clk_req_b_tbl)); + if (baco_program_registers(hwmgr, enter_baco_tbl, + ARRAY_SIZE(enter_baco_tbl))) + return 0; + + } else if (state == BACO_STATE_OUT) { + /* HW requires at least 20ms between regulator off and on */ + msleep(20); + /* Execute Hardware BACO exit sequence */ + if (baco_program_registers(hwmgr, exit_baco_tbl, + ARRAY_SIZE(exit_baco_tbl))) { + if (baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return 0; + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h new file mode 100644 index 000000000000..87a5fa0a157a --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris_baco.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __POLARIS_BACO_H__ +#define __POLARIS_BACO_H__ +#include "smu7_baco.h" + +extern int polaris_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c new file mode 100644 index 000000000000..044cda005aed --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.c @@ -0,0 +1,91 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "smu7_baco.h" +#include "tonga_baco.h" +#include "fiji_baco.h" +#include "polaris_baco.h" +#include "ci_baco.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" + +int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint32_t reg; + + *cap = false; + if (!phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_BACO)) + return 0; + + reg = RREG32(mmCC_BIF_BX_FUSESTRAP0); + + if (reg & CC_BIF_BX_FUSESTRAP0__STRAP_BIF_PX_CAPABLE_MASK) + *cap = true; + + return 0; +} + +int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + uint32_t reg; + + reg = RREG32(mmBACO_CNTL); + + if (reg & BACO_CNTL__BACO_MODE_MASK) + /* gfx has already entered BACO state */ + *state = BACO_STATE_IN; + else + *state = BACO_STATE_OUT; + return 0; +} + +int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + + switch (adev->asic_type) { + case CHIP_TOPAZ: + case CHIP_TONGA: + return tonga_baco_set_state(hwmgr, state); + case CHIP_FIJI: + return fiji_baco_set_state(hwmgr, state); + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_VEGAM: + return polaris_baco_set_state(hwmgr, state); +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: + case CHIP_HAWAII: + return ci_baco_set_state(hwmgr, state); +#endif + default: + return -EINVAL; + } +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h new file mode 100644 index 000000000000..be0d98abb536 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_baco.h @@ -0,0 +1,32 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU7_BACO_H__ +#define __SMU7_BACO_H__ +#include "hwmgr.h" +#include "common_baco.h" + +extern int smu7_baco_get_capability(struct pp_hwmgr *hwmgr, bool *cap); +extern int smu7_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); +extern int smu7_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index 203ce4b1028f..f73dff68e799 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -48,6 +48,7 @@ #include "smu7_clockpowergating.h" #include "processpptables.h" #include "pp_thermal.h" +#include "smu7_baco.h" #include "ivsrcid/ivsrcid_vislands30.h" @@ -1994,7 +1995,6 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_voltage_lookup_table *lookup_table) { uint32_t table_size, i, j; - struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; table_size = lookup_table->count; PP_ASSERT_WITH_CODE(0 != lookup_table->count, @@ -2005,9 +2005,8 @@ static int smu7_sort_lookup_table(struct pp_hwmgr *hwmgr, for (j = i + 1; j > 0; j--) { if (lookup_table->entries[j].us_vdd < lookup_table->entries[j - 1].us_vdd) { - tmp_voltage_lookup_record = lookup_table->entries[j - 1]; - lookup_table->entries[j - 1] = lookup_table->entries[j]; - lookup_table->entries[j] = tmp_voltage_lookup_record; + swap(lookup_table->entries[j - 1], + lookup_table->entries[j]); } } } @@ -3983,6 +3982,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input) "Failed to populate and upload SCLK MCLK DPM levels!", result = tmp_result); + /* + * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. + * That effectively disables AVFS feature. + */ + if (hwmgr->hardcode_pp_table != NULL) + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + tmp_result = smu7_update_avfs(hwmgr); PP_ASSERT_WITH_CODE((0 == tmp_result), "Failed to update avfs voltages!", @@ -5158,6 +5164,9 @@ static const struct pp_hwmgr_func smu7_hwmgr_funcs = { .get_power_profile_mode = smu7_get_power_profile_mode, .set_power_profile_mode = smu7_set_power_profile_mode, .get_performance_level = smu7_get_performance_level, + .get_asic_baco_capability = smu7_baco_get_capability, + .get_asic_baco_state = smu7_baco_get_state, + .set_asic_baco_state = smu7_baco_set_state, .power_off_asic = smu7_power_off_asic, }; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c new file mode 100644 index 000000000000..ea743bea8e29 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.c @@ -0,0 +1,231 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#include "amdgpu.h" +#include "tonga_baco.h" + +#include "gmc/gmc_8_1_d.h" +#include "gmc/gmc_8_1_sh_mask.h" + +#include "bif/bif_5_0_d.h" +#include "bif/bif_5_0_sh_mask.h" + +#include "dce/dce_10_0_d.h" +#include "dce/dce_10_0_sh_mask.h" + +#include "smu/smu_7_1_2_d.h" +#include "smu/smu_7_1_2_sh_mask.h" + + +static const struct baco_cmd_entry gpio_tbl[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff }, + { CMD_WRITE, mmDC_GPIO_DVODATA_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmDC_GPIO_DVODATA_MASK, 0, 0, 0, 0xffffffff }, + { CMD_WRITE, mmDC_GPIO_GENERIC_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_GENERIC_MASK, 0, 0, 0, 0x03333333 }, + { CMD_WRITE, mmDC_GPIO_SYNCA_EN, 0, 0, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmDC_GPIO_SYNCA_MASK, 0, 0, 0, 0x00001111 } +}; + +static const struct baco_cmd_entry enable_fb_req_rej_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0300024 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x1, 0x0, 0, 0x1 }, + { CMD_WRITE, mmBIF_FB_EN, 0, 0, 0, 0x0 } +}; + +static const struct baco_cmd_entry use_bclk_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN_MASK, CG_SPLL_FUNC_CNTL__SPLL_BYPASS_EN__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_BYPASS_CHG__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_STATUS }, + { CMD_WAITFOR, mmGCK_SMC_IND_DATA, CG_SPLL_STATUS__SPLL_CHG_STATUS_MASK, 0, 0xffffffff, 0x2 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG_MASK, CG_SPLL_FUNC_CNTL_2__SPLL_CTLREQ_CHG__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x4000000, 0x1a, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x2 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL_MASK, MPLL_CNTL_MODE__MPLL_SW_DIR_CONTROL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__MPLL_MCLK_SEL_MASK, MPLL_CNTL_MODE__MPLL_MCLK_SEL__SHIFT, 0, 0x0 } +}; + +static const struct baco_cmd_entry turn_off_plls_tbl[] = +{ + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_SPLL_FUNC_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK, CG_SPLL_FUNC_CNTL__SPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK, CG_SPLL_FUNC_CNTL__SPLL_PWRON__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, 0xC0500170 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x2000000, 0x19, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, 0x8000000, 0x1b, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMPLL_CNTL_MODE, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET_MASK, MPLL_CNTL_MODE__GLOBAL_MPLL_RESET__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmMPLL_CONTROL, 0, 0, 0, 0x00000006 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D0, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY0_D1, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D0, 0, 0, 0, 0x00007740 }, + { CMD_WRITE, mmMC_IO_RXCNTL_DPHY1_D1, 0, 0, 0, 0x00007740 }, + { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK0_PDNB__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMCLK_PWRMGT_CNTL, MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK, MCLK_PWRMGT_CNTL__MRDCK1_PDNB__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PU_MASK, MC_SEQ_CNTL_2__DRST_PU__SHIFT, 0, 0x0 }, + { CMD_READMODIFYWRITE, mmMC_SEQ_CNTL_2, MC_SEQ_CNTL_2__DRST_PD_MASK, MC_SEQ_CNTL_2__DRST_PD__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL_2 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN_MASK, CG_CLKPIN_CNTL_2__FORCE_BIF_REFCLK_EN__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMPLL_BYPASSCLK_SEL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL_MASK, MPLL_BYPASSCLK_SEL__MPLL_CLKOUT_SEL__SHIFT, 0, 0x4 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixMISC_CLK_CTRL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL_MASK, MISC_CLK_CTRL__DEEP_SLEEP_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, MISC_CLK_CTRL__ZCLK_SEL_MASK, MISC_CLK_CTRL__ZCLK_SEL__SHIFT, 0, 0x1 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixCG_CLKPIN_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, CG_CLKPIN_CNTL__BCLK_AS_XCLK_MASK, CG_CLKPIN_CNTL__BCLK_AS_XCLK__SHIFT, 0, 0x0 }, + { CMD_WRITE, mmGCK_SMC_IND_INDEX, 0, 0, 0, ixTHM_CLK_CNTL }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__CMON_CLK_SEL_MASK, THM_CLK_CNTL__CMON_CLK_SEL__SHIFT, 0, 0x1 }, + { CMD_READMODIFYWRITE, mmGCK_SMC_IND_DATA, THM_CLK_CNTL__TMON_CLK_SEL_MASK, THM_CLK_CNTL__TMON_CLK_SEL__SHIFT, 0, 0x1 } +}; + +static const struct baco_cmd_entry enter_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, 0, 5, 0x40000 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, 0, 5, 0x02 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, 0, 5, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, 0, 5, 0x08 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x40 } +}; + +#define BACO_CNTL__PWRGOOD_MASK BACO_CNTL__PWRGOOD_GPIO_MASK+BACO_CNTL__PWRGOOD_MEM_MASK+BACO_CNTL__PWRGOOD_DVO_MASK + +static const struct baco_cmd_entry exit_baco_tbl[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_6, 0, 0, 0, 0 }, + { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 } +}; + +static const struct baco_cmd_entry gpio_tbl_iceland[] = +{ + { CMD_WRITE, mmGPIOPAD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PD_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_PU_EN, 0, 0, 0, 0x0 }, + { CMD_WRITE, mmGPIOPAD_MASK, 0, 0, 0, 0xff77ffff } +}; + +static const struct baco_cmd_entry exit_baco_tbl_iceland[] = +{ + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BCLK_OFF_MASK, BACO_CNTL__BACO_BCLK_OFF__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_POWER_OFF_MASK, BACO_CNTL__BACO_POWER_OFF__SHIFT, 0, 0x00 }, + { CMD_DELAY_MS, 0, 0, 0, 20, 0 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_BF_MASK, 0, 0xffffffff, 0x200 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ISO_DIS_MASK, BACO_CNTL__BACO_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__PWRGOOD_MASK, 0, 5, 0x1c00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_ANA_ISO_DIS_MASK, BACO_CNTL__BACO_ANA_ISO_DIS__SHIFT, 0, 0x01 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_BIF_SCLK_SWITCH_MASK, BACO_CNTL__BACO_BIF_SCLK_SWITCH__SHIFT, 0, 0x00 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_RESET_EN_MASK, BACO_CNTL__BACO_RESET_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__RCU_BIF_CONFIG_DONE_MASK, 0, 5, 0x100 }, + { CMD_READMODIFYWRITE, mmBACO_CNTL, BACO_CNTL__BACO_EN_MASK, BACO_CNTL__BACO_EN__SHIFT, 0, 0x00 }, + { CMD_WAITFOR, mmBACO_CNTL, BACO_CNTL__BACO_MODE_MASK, 0, 0xffffffff, 0x00 } +}; + +static const struct baco_cmd_entry clean_baco_tbl_iceland[] = +{ + { CMD_WRITE, mmBIOS_SCRATCH_7, 0, 0, 0, 0 } +}; + +int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) +{ + enum BACO_STATE cur_state; + + smu7_baco_get_state(hwmgr, &cur_state); + + if (cur_state == state) + /* aisc already in the target state */ + return 0; + + if (state == BACO_STATE_IN) { + if (hwmgr->chip_id == CHIP_TOPAZ) + baco_program_registers(hwmgr, gpio_tbl_iceland, ARRAY_SIZE(gpio_tbl_iceland)); + else + baco_program_registers(hwmgr, gpio_tbl, ARRAY_SIZE(gpio_tbl)); + baco_program_registers(hwmgr, enable_fb_req_rej_tbl, + ARRAY_SIZE(enable_fb_req_rej_tbl)); + baco_program_registers(hwmgr, use_bclk_tbl, ARRAY_SIZE(use_bclk_tbl)); + baco_program_registers(hwmgr, turn_off_plls_tbl, + ARRAY_SIZE(turn_off_plls_tbl)); + if (baco_program_registers(hwmgr, enter_baco_tbl, + ARRAY_SIZE(enter_baco_tbl))) + return 0; + + } else if (state == BACO_STATE_OUT) { + /* HW requires at least 20ms between regulator off and on */ + msleep(20); + /* Execute Hardware BACO exit sequence */ + if (hwmgr->chip_id == CHIP_TOPAZ) { + if (baco_program_registers(hwmgr, exit_baco_tbl_iceland, + ARRAY_SIZE(exit_baco_tbl_iceland))) { + if (baco_program_registers(hwmgr, clean_baco_tbl_iceland, + ARRAY_SIZE(clean_baco_tbl_iceland))) + return 0; + } + } else { + if (baco_program_registers(hwmgr, exit_baco_tbl, + ARRAY_SIZE(exit_baco_tbl))) { + if (baco_program_registers(hwmgr, clean_baco_tbl, + ARRAY_SIZE(clean_baco_tbl))) + return 0; + } + } + } + + return -EINVAL; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h new file mode 100644 index 000000000000..5dc16cc8a295 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_baco.h @@ -0,0 +1,29 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __TONGA_BACO_H__ +#define __TONGA_BACO_H__ +#include "smu7_baco.h" + +extern int tonga_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c index beacfffbdc3e..d71a492c87a3 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c @@ -712,7 +712,6 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr, struct phm_ppt_v1_voltage_lookup_table *lookup_table) { uint32_t table_size, i, j; - struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record; PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count, "Lookup table is empty", return -EINVAL); @@ -724,9 +723,8 @@ static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr, for (j = i + 1; j > 0; j--) { if (lookup_table->entries[j].us_vdd < lookup_table->entries[j - 1].us_vdd) { - tmp_voltage_lookup_record = lookup_table->entries[j - 1]; - lookup_table->entries[j - 1] = lookup_table->entries[j]; - lookup_table->entries[j] = tmp_voltage_lookup_record; + swap(lookup_table->entries[j - 1], + lookup_table->entries[j]); } } } @@ -3691,6 +3689,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr, PP_ASSERT_WITH_CODE(!result, "Failed to upload PPtable!", return result); + /* + * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag. + * That effectively disables AVFS feature. + */ + if(hwmgr->hardcode_pp_table != NULL) + data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; + vega10_update_avfs(hwmgr); /* @@ -5265,6 +5270,59 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_ return 0; } +static int vega10_disable_power_features_for_compute_performance(struct pp_hwmgr *hwmgr, bool disable) +{ + struct vega10_hwmgr *data = hwmgr->backend; + uint32_t feature_mask = 0; + + if (disable) { + feature_mask |= data->smu_features[GNLD_ULV].enabled ? + data->smu_features[GNLD_ULV].smu_feature_bitmap : 0; + feature_mask |= data->smu_features[GNLD_DS_GFXCLK].enabled ? + data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0; + feature_mask |= data->smu_features[GNLD_DS_SOCCLK].enabled ? + data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0; + feature_mask |= data->smu_features[GNLD_DS_LCLK].enabled ? + data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0; + feature_mask |= data->smu_features[GNLD_DS_DCEFCLK].enabled ? + data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0; + } else { + feature_mask |= (!data->smu_features[GNLD_ULV].enabled) ? + data->smu_features[GNLD_ULV].smu_feature_bitmap : 0; + feature_mask |= (!data->smu_features[GNLD_DS_GFXCLK].enabled) ? + data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap : 0; + feature_mask |= (!data->smu_features[GNLD_DS_SOCCLK].enabled) ? + data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap : 0; + feature_mask |= (!data->smu_features[GNLD_DS_LCLK].enabled) ? + data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap : 0; + feature_mask |= (!data->smu_features[GNLD_DS_DCEFCLK].enabled) ? + data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap : 0; + } + + if (feature_mask) + PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr, + !disable, feature_mask), + "enable/disable power features for compute performance Failed!", + return -EINVAL); + + if (disable) { + data->smu_features[GNLD_ULV].enabled = false; + data->smu_features[GNLD_DS_GFXCLK].enabled = false; + data->smu_features[GNLD_DS_SOCCLK].enabled = false; + data->smu_features[GNLD_DS_LCLK].enabled = false; + data->smu_features[GNLD_DS_DCEFCLK].enabled = false; + } else { + data->smu_features[GNLD_ULV].enabled = true; + data->smu_features[GNLD_DS_GFXCLK].enabled = true; + data->smu_features[GNLD_DS_SOCCLK].enabled = true; + data->smu_features[GNLD_DS_LCLK].enabled = true; + data->smu_features[GNLD_DS_DCEFCLK].enabled = true; + } + + return 0; + +} + static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .backend_init = vega10_hwmgr_backend_init, .backend_fini = vega10_hwmgr_backend_fini, @@ -5332,6 +5390,8 @@ static const struct pp_hwmgr_func vega10_hwmgr_funcs = { .get_ppfeature_status = vega10_get_ppfeature_status, .set_ppfeature_status = vega10_set_ppfeature_status, .set_mp1_state = vega10_set_mp1_state, + .disable_power_features_for_compute_performance = + vega10_disable_power_features_for_compute_performance, }; int vega10_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c index df6ff9252401..9b5e72bdceca 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_baco.c @@ -29,7 +29,7 @@ #include "vega20_baco.h" #include "vega20_smumgr.h" - +#include "amdgpu_ras.h" static const struct soc15_baco_cmd_entry clean_baco_tbl[] = { @@ -74,6 +74,7 @@ int vega20_baco_get_state(struct pp_hwmgr *hwmgr, enum BACO_STATE *state) int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) { struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev); + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); enum BACO_STATE cur_state; uint32_t data; @@ -84,13 +85,19 @@ int vega20_baco_set_state(struct pp_hwmgr *hwmgr, enum BACO_STATE state) return 0; if (state == BACO_STATE_IN) { - data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); - data |= 0x80000000; - WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); - - - if(smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_EnterBaco, 0)) - return -EINVAL; + if (!ras || !ras->supported) { + data = RREG32_SOC15(THM, 0, mmTHM_BACO_CNTL); + data |= 0x80000000; + WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data); + + if(smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnterBaco, 0)) + return -EINVAL; + } else { + if(smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_EnterBaco, 1)) + return -EINVAL; + } } else if (state == BACO_STATE_OUT) { if (smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ExitBaco)) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c index f5915308e643..5bcf0d684151 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c @@ -183,6 +183,9 @@ static int vega20_set_features_platform_caps(struct pp_hwmgr *hwmgr) PHM_PlatformCaps_TablelessHardwareInterface); phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_BACO); + + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_EnableSMU7ThermalManagement); if (adev->pg_flags & AMD_PG_SUPPORT_UVD) @@ -490,8 +493,8 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr) "Failed to init sclk threshold!", return ret); - if (adev->in_baco_reset) { - adev->in_baco_reset = 0; + if (adev->in_gpu_reset && + (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) { ret = vega20_baco_apply_vdci_flush_workaround(hwmgr); if (ret) @@ -4155,6 +4158,38 @@ static int vega20_smu_i2c_bus_access(struct pp_hwmgr *hwmgr, bool acquire) return res; } +static int vega20_set_df_cstate(struct pp_hwmgr *hwmgr, + enum pp_df_cstate state) +{ + int ret; + + /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */ + if (hwmgr->smu_version < 0x283200) { + pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n"); + return -EINVAL; + } + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DFCstateControl, state); + if (ret) + pr_err("SetDfCstate failed!\n"); + + return ret; +} + +static int vega20_set_xgmi_pstate(struct pp_hwmgr *hwmgr, + uint32_t pstate) +{ + int ret; + + ret = smum_send_msg_to_smc_with_parameter(hwmgr, + PPSMC_MSG_SetXgmiMode, + pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3); + if (ret) + pr_err("SetXgmiPstate failed!\n"); + + return ret; +} + static const struct pp_hwmgr_func vega20_hwmgr_funcs = { /* init/fini related */ .backend_init = vega20_hwmgr_backend_init, @@ -4223,6 +4258,8 @@ static const struct pp_hwmgr_func vega20_hwmgr_funcs = { .set_asic_baco_state = vega20_baco_set_state, .set_mp1_state = vega20_set_mp1_state, .smu_i2c_bus_access = vega20_smu_i2c_bus_access, + .set_df_cstate = vega20_set_df_cstate, + .set_xgmi_pstate = vega20_set_xgmi_pstate, }; int vega20_hwmgr_init(struct pp_hwmgr *hwmgr) diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h index 23171a4d9a31..031e0c22fcc7 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h @@ -259,10 +259,8 @@ struct smu_table_context struct smu_bios_boot_up_values boot_values; void *driver_pptable; struct smu_table *tables; - uint32_t table_count; struct smu_table memory_pool; uint8_t thermal_controller_type; - uint16_t TDPODLimit; void *overdrive_table; }; @@ -322,6 +320,13 @@ struct mclock_latency_table { struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM]; }; +enum smu_reset_mode +{ + SMU_RESET_MODE_0, + SMU_RESET_MODE_1, + SMU_RESET_MODE_2, +}; + enum smu_baco_state { SMU_BACO_STATE_ENTER = 0, @@ -341,7 +346,6 @@ struct smu_context struct amdgpu_device *adev; struct amdgpu_irq_src *irq_source; - const struct smu_funcs *funcs; const struct pptable_funcs *ppt_funcs; struct mutex mutex; struct mutex sensor_lock; @@ -382,11 +386,15 @@ struct smu_context uint32_t power_profile_mode; uint32_t default_power_profile_mode; bool pm_enabled; + bool is_apu; uint32_t smc_if_version; + bool uploading_custom_pp_table; }; +struct i2c_adapter; + struct pptable_funcs { int (*alloc_dpm_context)(struct smu_context *smu); int (*store_powerplay_table)(struct smu_context *smu); @@ -398,7 +406,7 @@ struct pptable_funcs { int (*get_smu_table_index)(struct smu_context *smu, uint32_t index); int (*get_smu_power_index)(struct smu_context *smu, uint32_t index); int (*get_workload_type)(struct smu_context *smu, enum PP_SMC_POWER_PROFILE profile); - int (*run_afll_btc)(struct smu_context *smu); + int (*run_btc)(struct smu_context *smu); int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu); int (*set_default_dpm_table)(struct smu_context *smu); @@ -459,17 +467,19 @@ struct pptable_funcs { int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); void (*dump_pptable)(struct smu_context *smu); int (*get_power_limit)(struct smu_context *smu, uint32_t *limit, bool asic_default); - int (*get_dpm_uclk_limited)(struct smu_context *smu, uint32_t *clock, bool max); -}; - -struct smu_funcs -{ + int (*get_dpm_clk_limited)(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t dpm_level, uint32_t *freq); + int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state); + int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap); + int (*i2c_eeprom_init)(struct i2c_adapter *control); + void (*i2c_eeprom_fini)(struct i2c_adapter *control); + int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table); int (*init_microcode)(struct smu_context *smu); + int (*load_microcode)(struct smu_context *smu); int (*init_smc_tables)(struct smu_context *smu); int (*fini_smc_tables)(struct smu_context *smu); int (*init_power)(struct smu_context *smu); int (*fini_power)(struct smu_context *smu); - int (*load_microcode)(struct smu_context *smu); int (*check_fw_status)(struct smu_context *smu); int (*setup_pptable)(struct smu_context *smu); int (*get_vbios_bootup_values)(struct smu_context *smu); @@ -485,7 +495,6 @@ struct smu_funcs int (*set_min_dcef_deep_sleep)(struct smu_context *smu); int (*set_tool_table_location)(struct smu_context *smu); int (*notify_memory_pool_location)(struct smu_context *smu); - int (*write_watermarks_table)(struct smu_context *smu); int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu); int (*system_features_control)(struct smu_context *smu, bool en); int (*send_smc_msg)(struct smu_context *smu, uint16_t msg); @@ -499,8 +508,7 @@ struct smu_funcs int (*get_current_clk_freq)(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value); int (*init_max_sustainable_clocks)(struct smu_context *smu); int (*start_thermal_control)(struct smu_context *smu); - int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor, - void *data, uint32_t *size); + int (*stop_thermal_control)(struct smu_context *smu); int (*set_deep_sleep_dcefclk)(struct smu_context *smu, uint32_t clk); int (*set_active_display_count)(struct smu_context *smu, uint32_t count); int (*store_cc6_data)(struct smu_context *smu, uint32_t separation_time, @@ -522,8 +530,6 @@ struct smu_funcs int (*get_current_shallow_sleep_clocks)(struct smu_context *smu, struct smu_clock_info *clocks); int (*notify_smu_enable_pwe)(struct smu_context *smu); - int (*set_watermarks_for_clock_ranges)(struct smu_context *smu, - struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); int (*conv_power_profile_to_pplib_workload)(int power_profile); uint32_t (*get_fan_control_mode)(struct smu_context *smu); int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); @@ -538,234 +544,90 @@ struct smu_funcs enum smu_baco_state (*baco_get_state)(struct smu_context *smu); int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state); int (*baco_reset)(struct smu_context *smu); + int (*mode2_reset)(struct smu_context *smu); int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); + int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); + int (*override_pcie_parameters)(struct smu_context *smu); + uint32_t (*get_pptable_power_limit)(struct smu_context *smu); }; -#define smu_init_microcode(smu) \ - ((smu)->funcs->init_microcode ? (smu)->funcs->init_microcode((smu)) : 0) -#define smu_init_smc_tables(smu) \ - ((smu)->funcs->init_smc_tables ? (smu)->funcs->init_smc_tables((smu)) : 0) -#define smu_fini_smc_tables(smu) \ - ((smu)->funcs->fini_smc_tables ? (smu)->funcs->fini_smc_tables((smu)) : 0) -#define smu_init_power(smu) \ - ((smu)->funcs->init_power ? (smu)->funcs->init_power((smu)) : 0) -#define smu_fini_power(smu) \ - ((smu)->funcs->fini_power ? (smu)->funcs->fini_power((smu)) : 0) -#define smu_load_microcode(smu) \ - ((smu)->funcs->load_microcode ? (smu)->funcs->load_microcode((smu)) : 0) -#define smu_check_fw_status(smu) \ - ((smu)->funcs->check_fw_status ? (smu)->funcs->check_fw_status((smu)) : 0) -#define smu_setup_pptable(smu) \ - ((smu)->funcs->setup_pptable ? (smu)->funcs->setup_pptable((smu)) : 0) -#define smu_powergate_sdma(smu, gate) \ - ((smu)->funcs->powergate_sdma ? (smu)->funcs->powergate_sdma((smu), (gate)) : 0) -#define smu_powergate_vcn(smu, gate) \ - ((smu)->funcs->powergate_vcn ? (smu)->funcs->powergate_vcn((smu), (gate)) : 0) -#define smu_set_gfx_cgpg(smu, enabled) \ - ((smu)->funcs->set_gfx_cgpg ? (smu)->funcs->set_gfx_cgpg((smu), (enabled)) : 0) -#define smu_get_vbios_bootup_values(smu) \ - ((smu)->funcs->get_vbios_bootup_values ? (smu)->funcs->get_vbios_bootup_values((smu)) : 0) -#define smu_get_clk_info_from_vbios(smu) \ - ((smu)->funcs->get_clk_info_from_vbios ? (smu)->funcs->get_clk_info_from_vbios((smu)) : 0) -#define smu_check_pptable(smu) \ - ((smu)->funcs->check_pptable ? (smu)->funcs->check_pptable((smu)) : 0) -#define smu_parse_pptable(smu) \ - ((smu)->funcs->parse_pptable ? (smu)->funcs->parse_pptable((smu)) : 0) -#define smu_populate_smc_tables(smu) \ - ((smu)->funcs->populate_smc_tables ? (smu)->funcs->populate_smc_tables((smu)) : 0) -#define smu_check_fw_version(smu) \ - ((smu)->funcs->check_fw_version ? (smu)->funcs->check_fw_version((smu)) : 0) -#define smu_write_pptable(smu) \ - ((smu)->funcs->write_pptable ? (smu)->funcs->write_pptable((smu)) : 0) -#define smu_set_min_dcef_deep_sleep(smu) \ - ((smu)->funcs->set_min_dcef_deep_sleep ? (smu)->funcs->set_min_dcef_deep_sleep((smu)) : 0) -#define smu_set_tool_table_location(smu) \ - ((smu)->funcs->set_tool_table_location ? (smu)->funcs->set_tool_table_location((smu)) : 0) -#define smu_notify_memory_pool_location(smu) \ - ((smu)->funcs->notify_memory_pool_location ? (smu)->funcs->notify_memory_pool_location((smu)) : 0) -#define smu_gfx_off_control(smu, enable) \ - ((smu)->funcs->gfx_off_control ? (smu)->funcs->gfx_off_control((smu), (enable)) : 0) - -#define smu_write_watermarks_table(smu) \ - ((smu)->funcs->write_watermarks_table ? (smu)->funcs->write_watermarks_table((smu)) : 0) -#define smu_set_last_dcef_min_deep_sleep_clk(smu) \ - ((smu)->funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0) -#define smu_system_features_control(smu, en) \ - ((smu)->funcs->system_features_control ? (smu)->funcs->system_features_control((smu), (en)) : 0) -#define smu_init_max_sustainable_clocks(smu) \ - ((smu)->funcs->init_max_sustainable_clocks ? (smu)->funcs->init_max_sustainable_clocks((smu)) : 0) -#define smu_set_default_od_settings(smu, initialize) \ - ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) -#define smu_set_fan_speed_rpm(smu, speed) \ - ((smu)->funcs->set_fan_speed_rpm ? (smu)->funcs->set_fan_speed_rpm((smu), (speed)) : 0) -#define smu_send_smc_msg(smu, msg) \ - ((smu)->funcs->send_smc_msg? (smu)->funcs->send_smc_msg((smu), (msg)) : 0) -#define smu_send_smc_msg_with_param(smu, msg, param) \ - ((smu)->funcs->send_smc_msg_with_param? (smu)->funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0) -#define smu_read_smc_arg(smu, arg) \ - ((smu)->funcs->read_smc_arg? (smu)->funcs->read_smc_arg((smu), (arg)) : 0) -#define smu_alloc_dpm_context(smu) \ - ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0) -#define smu_init_display_count(smu, count) \ - ((smu)->funcs->init_display_count ? (smu)->funcs->init_display_count((smu), (count)) : 0) -#define smu_feature_set_allowed_mask(smu) \ - ((smu)->funcs->set_allowed_mask? (smu)->funcs->set_allowed_mask((smu)) : 0) -#define smu_feature_get_enabled_mask(smu, mask, num) \ - ((smu)->funcs->get_enabled_mask? (smu)->funcs->get_enabled_mask((smu), (mask), (num)) : 0) -#define smu_is_dpm_running(smu) \ - ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0) -#define smu_notify_display_change(smu) \ - ((smu)->funcs->notify_display_change? (smu)->funcs->notify_display_change((smu)) : 0) -#define smu_store_powerplay_table(smu) \ - ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0) -#define smu_check_powerplay_table(smu) \ - ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0) -#define smu_append_powerplay_table(smu) \ - ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0) -#define smu_set_default_dpm_table(smu) \ - ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0) -#define smu_populate_umd_state_clk(smu) \ - ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0) -#define smu_set_default_od8_settings(smu) \ - ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0) -#define smu_get_power_limit(smu, limit, def) \ - ((smu)->ppt_funcs->get_power_limit ? (smu)->ppt_funcs->get_power_limit((smu), (limit), (def)) : 0) -#define smu_set_power_limit(smu, limit) \ - ((smu)->funcs->set_power_limit ? (smu)->funcs->set_power_limit((smu), (limit)) : 0) -#define smu_get_current_clk_freq(smu, clk_id, value) \ - ((smu)->funcs->get_current_clk_freq? (smu)->funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0) -#define smu_print_clk_levels(smu, clk_type, buf) \ - ((smu)->ppt_funcs->print_clk_levels ? (smu)->ppt_funcs->print_clk_levels((smu), (clk_type), (buf)) : 0) -#define smu_force_clk_levels(smu, clk_type, level) \ - ((smu)->ppt_funcs->force_clk_levels ? (smu)->ppt_funcs->force_clk_levels((smu), (clk_type), (level)) : 0) -#define smu_get_od_percentage(smu, type) \ - ((smu)->ppt_funcs->get_od_percentage ? (smu)->ppt_funcs->get_od_percentage((smu), (type)) : 0) -#define smu_set_od_percentage(smu, type, value) \ - ((smu)->ppt_funcs->set_od_percentage ? (smu)->ppt_funcs->set_od_percentage((smu), (type), (value)) : 0) -#define smu_od_edit_dpm_table(smu, type, input, size) \ - ((smu)->ppt_funcs->od_edit_dpm_table ? (smu)->ppt_funcs->od_edit_dpm_table((smu), (type), (input), (size)) : 0) -#define smu_tables_init(smu, tab) \ - ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0) -#define smu_set_thermal_fan_table(smu) \ - ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0) -#define smu_start_thermal_control(smu) \ - ((smu)->funcs->start_thermal_control? (smu)->funcs->start_thermal_control((smu)) : 0) -#define smu_read_sensor(smu, sensor, data, size) \ - ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : 0) -#define smu_smc_read_sensor(smu, sensor, data, size) \ - ((smu)->funcs->read_sensor? (smu)->funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL) -#define smu_get_power_profile_mode(smu, buf) \ - ((smu)->ppt_funcs->get_power_profile_mode ? (smu)->ppt_funcs->get_power_profile_mode((smu), buf) : 0) -#define smu_set_power_profile_mode(smu, param, param_size) \ - ((smu)->ppt_funcs->set_power_profile_mode ? (smu)->ppt_funcs->set_power_profile_mode((smu), (param), (param_size)) : 0) -#define smu_pre_display_config_changed(smu) \ - ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0) -#define smu_display_config_changed(smu) \ - ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0) -#define smu_apply_clocks_adjust_rules(smu) \ - ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0) -#define smu_notify_smc_dispaly_config(smu) \ - ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0) -#define smu_force_dpm_limit_value(smu, highest) \ - ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0) -#define smu_unforce_dpm_levels(smu) \ - ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0) -#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \ - ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0) -#define smu_set_cpu_power_state(smu) \ - ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0) -#define smu_get_fan_control_mode(smu) \ - ((smu)->funcs->get_fan_control_mode ? (smu)->funcs->get_fan_control_mode((smu)) : 0) -#define smu_set_fan_control_mode(smu, value) \ - ((smu)->funcs->set_fan_control_mode ? (smu)->funcs->set_fan_control_mode((smu), (value)) : 0) -#define smu_get_fan_speed_percent(smu, speed) \ - ((smu)->ppt_funcs->get_fan_speed_percent ? (smu)->ppt_funcs->get_fan_speed_percent((smu), (speed)) : 0) -#define smu_set_fan_speed_percent(smu, speed) \ - ((smu)->funcs->set_fan_speed_percent ? (smu)->funcs->set_fan_speed_percent((smu), (speed)) : 0) -#define smu_get_fan_speed_rpm(smu, speed) \ - ((smu)->ppt_funcs->get_fan_speed_rpm ? (smu)->ppt_funcs->get_fan_speed_rpm((smu), (speed)) : 0) - -#define smu_msg_get_index(smu, msg) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) -#define smu_clk_get_index(smu, msg) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL) -#define smu_feature_get_index(smu, msg) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL) -#define smu_table_get_index(smu, tab) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL) -#define smu_power_get_index(smu, src) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL) -#define smu_workload_get_type(smu, profile) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL) -#define smu_run_afll_btc(smu) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_afll_btc? (smu)->ppt_funcs->run_afll_btc((smu)) : 0) : 0) -#define smu_get_allowed_feature_mask(smu, feature_mask, num) \ - ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0) -#define smu_set_deep_sleep_dcefclk(smu, clk) \ - ((smu)->funcs->set_deep_sleep_dcefclk ? (smu)->funcs->set_deep_sleep_dcefclk((smu), (clk)) : 0) -#define smu_set_active_display_count(smu, count) \ - ((smu)->funcs->set_active_display_count ? (smu)->funcs->set_active_display_count((smu), (count)) : 0) -#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \ - ((smu)->funcs->store_cc6_data ? (smu)->funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0) -#define smu_get_clock_by_type(smu, type, clocks) \ - ((smu)->funcs->get_clock_by_type ? (smu)->funcs->get_clock_by_type((smu), (type), (clocks)) : 0) -#define smu_get_max_high_clocks(smu, clocks) \ - ((smu)->funcs->get_max_high_clocks ? (smu)->funcs->get_max_high_clocks((smu), (clocks)) : 0) -#define smu_get_clock_by_type_with_latency(smu, clk_type, clocks) \ - ((smu)->ppt_funcs->get_clock_by_type_with_latency ? (smu)->ppt_funcs->get_clock_by_type_with_latency((smu), (clk_type), (clocks)) : 0) -#define smu_get_clock_by_type_with_voltage(smu, type, clocks) \ - ((smu)->ppt_funcs->get_clock_by_type_with_voltage ? (smu)->ppt_funcs->get_clock_by_type_with_voltage((smu), (type), (clocks)) : 0) -#define smu_display_clock_voltage_request(smu, clock_req) \ - ((smu)->funcs->display_clock_voltage_request ? (smu)->funcs->display_clock_voltage_request((smu), (clock_req)) : 0) -#define smu_display_disable_memory_clock_switch(smu, disable_memory_clock_switch) \ - ((smu)->ppt_funcs->display_disable_memory_clock_switch ? (smu)->ppt_funcs->display_disable_memory_clock_switch((smu), (disable_memory_clock_switch)) : -EINVAL) -#define smu_get_dal_power_level(smu, clocks) \ - ((smu)->funcs->get_dal_power_level ? (smu)->funcs->get_dal_power_level((smu), (clocks)) : 0) -#define smu_get_perf_level(smu, designation, level) \ - ((smu)->funcs->get_perf_level ? (smu)->funcs->get_perf_level((smu), (designation), (level)) : 0) -#define smu_get_current_shallow_sleep_clocks(smu, clocks) \ - ((smu)->funcs->get_current_shallow_sleep_clocks ? (smu)->funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0) -#define smu_notify_smu_enable_pwe(smu) \ - ((smu)->funcs->notify_smu_enable_pwe ? (smu)->funcs->notify_smu_enable_pwe((smu)) : 0) -#define smu_set_watermarks_for_clock_ranges(smu, clock_ranges) \ - ((smu)->funcs->set_watermarks_for_clock_ranges ? (smu)->funcs->set_watermarks_for_clock_ranges((smu), (clock_ranges)) : 0) -#define smu_dpm_set_uvd_enable(smu, enable) \ - ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0) -#define smu_dpm_set_vce_enable(smu, enable) \ - ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0) -#define smu_set_xgmi_pstate(smu, pstate) \ - ((smu)->funcs->set_xgmi_pstate ? (smu)->funcs->set_xgmi_pstate((smu), (pstate)) : 0) -#define smu_set_watermarks_table(smu, tab, clock_ranges) \ - ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0) -#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \ - ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0) -#define smu_thermal_temperature_range_update(smu, range, rw) \ - ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0) -#define smu_get_thermal_temperature_range(smu, range) \ - ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0) -#define smu_register_irq_handler(smu) \ - ((smu)->funcs->register_irq_handler ? (smu)->funcs->register_irq_handler(smu) : 0) -#define smu_set_azalia_d3_pme(smu) \ - ((smu)->funcs->set_azalia_d3_pme ? (smu)->funcs->set_azalia_d3_pme((smu)) : 0) -#define smu_get_dpm_ultimate_freq(smu, param, min, max) \ - ((smu)->funcs->get_dpm_ultimate_freq ? (smu)->funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0) -#define smu_get_max_sustainable_clocks_by_dc(smu, max_clocks) \ - ((smu)->funcs->get_max_sustainable_clocks_by_dc ? (smu)->funcs->get_max_sustainable_clocks_by_dc((smu), (max_clocks)) : 0) -#define smu_get_uclk_dpm_states(smu, clocks_in_khz, num_states) \ - ((smu)->ppt_funcs->get_uclk_dpm_states ? (smu)->ppt_funcs->get_uclk_dpm_states((smu), (clocks_in_khz), (num_states)) : 0) -#define smu_baco_is_support(smu) \ - ((smu)->funcs->baco_is_support? (smu)->funcs->baco_is_support((smu)) : false) -#define smu_baco_get_state(smu, state) \ - ((smu)->funcs->baco_get_state? (smu)->funcs->baco_get_state((smu), (state)) : 0) -#define smu_baco_reset(smu) \ - ((smu)->funcs->baco_reset? (smu)->funcs->baco_reset((smu)) : 0) -#define smu_asic_set_performance_level(smu, level) \ - ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); -#define smu_dump_pptable(smu) \ - ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0) -#define smu_get_dpm_uclk_limited(smu, clock, max) \ - ((smu)->ppt_funcs->get_dpm_uclk_limited ? (smu)->ppt_funcs->get_dpm_uclk_limited((smu), (clock), (max)) : -EINVAL) +int smu_load_microcode(struct smu_context *smu); + +int smu_check_fw_status(struct smu_context *smu); + +int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); + +#define smu_i2c_eeprom_init(smu, control) \ + ((smu)->ppt_funcs->i2c_eeprom_init ? (smu)->ppt_funcs->i2c_eeprom_init((control)) : -EINVAL) +#define smu_i2c_eeprom_fini(smu, control) \ + ((smu)->ppt_funcs->i2c_eeprom_fini ? (smu)->ppt_funcs->i2c_eeprom_fini((control)) : -EINVAL) + +int smu_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed); + +int smu_get_power_limit(struct smu_context *smu, + uint32_t *limit, + bool def, + bool lock_needed); + +int smu_set_power_limit(struct smu_context *smu, uint32_t limit); +int smu_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf); +int smu_get_od_percentage(struct smu_context *smu, enum smu_clk_type type); +int smu_set_od_percentage(struct smu_context *smu, enum smu_clk_type type, uint32_t value); + +int smu_od_edit_dpm_table(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size); + +int smu_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size); +int smu_get_power_profile_mode(struct smu_context *smu, char *buf); + +int smu_set_power_profile_mode(struct smu_context *smu, + long *param, + uint32_t param_size, + bool lock_needed); +int smu_get_fan_control_mode(struct smu_context *smu); +int smu_set_fan_control_mode(struct smu_context *smu, int value); +int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed); +int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed); +int smu_get_fan_speed_rpm(struct smu_context *smu, uint32_t *speed); + +int smu_set_deep_sleep_dcefclk(struct smu_context *smu, int clk); +int smu_set_active_display_count(struct smu_context *smu, uint32_t count); + +int smu_get_clock_by_type(struct smu_context *smu, + enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks); + +int smu_get_max_high_clocks(struct smu_context *smu, + struct amd_pp_simple_clock_info *clocks); + +int smu_get_clock_by_type_with_latency(struct smu_context *smu, + enum smu_clk_type clk_type, + struct pp_clock_levels_with_latency *clocks); + +int smu_get_clock_by_type_with_voltage(struct smu_context *smu, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks); + +int smu_display_clock_voltage_request(struct smu_context *smu, + struct pp_display_clock_request *clock_req); +int smu_display_disable_memory_clock_switch(struct smu_context *smu, bool disable_memory_clock_switch); +int smu_notify_smu_enable_pwe(struct smu_context *smu); + +int smu_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate); + +int smu_set_azalia_d3_pme(struct smu_context *smu); + +bool smu_baco_is_support(struct smu_context *smu); + +int smu_baco_get_state(struct smu_context *smu, enum smu_baco_state *state); + +int smu_baco_reset(struct smu_context *smu); +int smu_mode2_reset(struct smu_context *smu); extern int smu_get_atom_data_table(struct smu_context *smu, uint32_t table, uint16_t *size, uint8_t *frev, uint8_t *crev, @@ -799,6 +661,10 @@ int smu_sys_get_pp_table(struct smu_context *smu, void **table); int smu_sys_set_pp_table(struct smu_context *smu, void *buf, size_t size); int smu_get_power_num_states(struct smu_context *smu, struct pp_states_info *state_info); enum amd_pm_state_type smu_get_current_power_state(struct smu_context *smu); +int smu_write_watermarks_table(struct smu_context *smu); +int smu_set_watermarks_for_clock_ranges( + struct smu_context *smu, + struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges); /* smu to display interface */ extern int smu_display_configuration_change(struct smu_context *smu, const @@ -809,7 +675,8 @@ extern int smu_get_current_clocks(struct smu_context *smu, extern int smu_dpm_set_power_gate(struct smu_context *smu,uint32_t block_type, bool gate); extern int smu_handle_task(struct smu_context *smu, enum amd_dpm_forced_level level, - enum amd_pp_task task_id); + enum amd_pp_task task_id, + bool lock_needed); int smu_switch_power_profile(struct smu_context *smu, enum PP_SMC_POWER_PROFILE type, bool en); @@ -819,7 +686,7 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *value); int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t *min, uint32_t *max); + uint32_t *min, uint32_t *max, bool lock_needed); int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, @@ -828,10 +695,29 @@ enum amd_dpm_forced_level smu_get_performance_level(struct smu_context *smu); int smu_force_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level); int smu_set_display_count(struct smu_context *smu, uint32_t count); bool smu_clk_dpm_is_enabled(struct smu_context *smu, enum smu_clk_type clk_type); -int smu_feature_update_enable_state(struct smu_context *smu, uint64_t feature_mask, bool enabled); const char *smu_get_message_name(struct smu_context *smu, enum smu_message_type type); const char *smu_get_feature_name(struct smu_context *smu, enum smu_feature_mask feature); size_t smu_sys_get_pp_feature_mask(struct smu_context *smu, char *buf); int smu_sys_set_pp_feature_mask(struct smu_context *smu, uint64_t new_mask); +int smu_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t mask, + bool lock_needed); +int smu_set_mp1_state(struct smu_context *smu, + enum pp_mp1_state mp1_state); +int smu_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state); + +int smu_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks); + +int smu_get_uclk_dpm_states(struct smu_context *smu, + unsigned int *clock_values_in_khz, + unsigned int *num_states); + +int smu_get_dpm_clock_table(struct smu_context *smu, + struct dpm_clocks *clock_table); + +uint32_t smu_get_pptable_power_limit(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h index 78e5927b7711..e3291259b249 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h @@ -95,8 +95,7 @@ //BTC #define PPSMC_MSG_RunAfllBtc 0x30 -#define PPSMC_MSG_RunGfxDcBtc 0x31 -#define PPSMC_MSG_RunSocDcBtc 0x32 +#define PPSMC_MSG_RunDcBtc 0x31 //Debug #define PPSMC_MSG_DramLogSetDramAddrHigh 0x33 diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 7bf9a14bfa0b..af977675fd33 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -355,6 +355,10 @@ struct pp_hwmgr_func { int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state); int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode); int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire); + int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state); + int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate); + int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr, + bool disable); }; struct pp_table_func { diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h index e02950b505fa..a886f0644d24 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_arcturus.h @@ -137,29 +137,29 @@ #define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT ) #define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) #define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) -#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) +#define FEATURE_DS_UCLK_MASK (1 << FEATURE_DS_UCLK_BIT ) #define FEATURE_GFX_ULV_MASK (1 << FEATURE_GFX_ULV_BIT ) -#define FEATURE_VCN_PG_MASK (1 << FEATURE_VCN_PG_BIT ) +#define FEATURE_DPM_VCN_MASK (1 << FEATURE_DPM_VCN_BIT ) #define FEATURE_RSMU_SMN_CG_MASK (1 << FEATURE_RSMU_SMN_CG_BIT ) #define FEATURE_WAFL_CG_MASK (1 << FEATURE_WAFL_CG_BIT ) #define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT ) #define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT ) -#define FEATURE_APCC_MASK (1 << FEATURE_APCC_BIT ) +#define FEATURE_APCC_PLUS_MASK (1 << FEATURE_APCC_PLUS_BIT ) #define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT ) #define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT ) #define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) #define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) #define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT ) -#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << EATURE_OUT_OF_BAND_MONITOR_BIT ) -#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_MASK ) +#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT ) +#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT ) //FIXME need updating // Debug Overrides Bitmask #define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000001 -#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCN_FCLK 0x00000002 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000002 // I2C Config Bit Defines #define I2C_CONTROLLER_ENABLED 1 @@ -423,18 +423,30 @@ typedef enum { } PwrConfig_e; typedef enum { - XGMI_LINK_RATE_12 = 0, // 12Gbps - XGMI_LINK_RATE_16, // 16Gbps - XGMI_LINK_RATE_22, // 22Gbps - XGMI_LINK_RATE_25, // 25Gbps + XGMI_LINK_RATE_2 = 2, // 2Gbps + XGMI_LINK_RATE_4 = 4, // 4Gbps + XGMI_LINK_RATE_8 = 8, // 8Gbps + XGMI_LINK_RATE_12 = 12, // 12Gbps + XGMI_LINK_RATE_16 = 16, // 16Gbps + XGMI_LINK_RATE_17 = 17, // 17Gbps + XGMI_LINK_RATE_18 = 18, // 18Gbps + XGMI_LINK_RATE_19 = 19, // 19Gbps + XGMI_LINK_RATE_20 = 20, // 20Gbps + XGMI_LINK_RATE_21 = 21, // 21Gbps + XGMI_LINK_RATE_22 = 22, // 22Gbps + XGMI_LINK_RATE_23 = 23, // 23Gbps + XGMI_LINK_RATE_24 = 24, // 24Gbps + XGMI_LINK_RATE_25 = 25, // 25Gbps XGMI_LINK_RATE_COUNT } XGMI_LINK_RATE_e; typedef enum { - XGMI_LINK_WIDTH_2 = 0, // x2 - XGMI_LINK_WIDTH_4, // x4 - XGMI_LINK_WIDTH_8, // x8 - XGMI_LINK_WIDTH_16, // x16 + XGMI_LINK_WIDTH_1 = 1, // x1 + XGMI_LINK_WIDTH_2 = 2, // x2 + XGMI_LINK_WIDTH_4 = 4, // x4 + XGMI_LINK_WIDTH_8 = 8, // x8 + XGMI_LINK_WIDTH_9 = 9, // x9 + XGMI_LINK_WIDTH_16 = 16, // x16 XGMI_LINK_WIDTH_COUNT } XGMI_LINK_WIDTH_e; @@ -696,7 +708,11 @@ typedef struct { uint8_t GpioI2cSda; // Serial Data uint16_t GpioPadding; - uint32_t BoardReserved[9]; + // Platform input telemetry voltage coefficient + uint32_t BoardVoltageCoeffA; // decode by /1000 + uint32_t BoardVoltageCoeffB; // decode by /1000 + + uint32_t BoardReserved[7]; // Padding for MMHUB - do not modify this uint32_t MmHubPadding[8]; // SMU internal use @@ -802,7 +818,7 @@ typedef struct { uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units - uint32_t EnabledAvfsModules[2]; + uint32_t EnabledAvfsModules[3]; uint32_t MmHubPadding[8]; // SMU internal use } AvfsFuseOverride_t; @@ -865,7 +881,8 @@ typedef struct { //#define TABLE_ACTIVITY_MONITOR_COEFF 7 #define TABLE_OVERDRIVE 7 #define TABLE_WAFL_XGMI_TOPOLOGY 8 -#define TABLE_COUNT 9 +#define TABLE_I2C_COMMANDS 9 +#define TABLE_COUNT 10 // These defines are used with the SMC_MSG_SetUclkFastSwitch message. typedef enum { diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h index b0dd05d431dd..d8c9b7f91fcc 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_types.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_types.h @@ -114,6 +114,7 @@ __SMU_DUMMY_MAP(PowerDownJpeg), \ __SMU_DUMMY_MAP(BacoAudioD3PME), \ __SMU_DUMMY_MAP(ArmD3), \ + __SMU_DUMMY_MAP(RunDcBtc), \ __SMU_DUMMY_MAP(RunGfxDcBtc), \ __SMU_DUMMY_MAP(RunSocDcBtc), \ __SMU_DUMMY_MAP(SetMemoryChannelEnable), \ @@ -168,6 +169,7 @@ __SMU_DUMMY_MAP(PowerGateAtHub), \ __SMU_DUMMY_MAP(SetSoftMinJpeg), \ __SMU_DUMMY_MAP(SetHardMinFclkByFreq), \ + __SMU_DUMMY_MAP(DFCstateControl), \ #undef __SMU_DUMMY_MAP #define __SMU_DUMMY_MAP(type) SMU_MSG_##type @@ -251,6 +253,7 @@ enum smu_clk_type { __SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN), \ __SMU_DUMMY_MAP(MMHUB_PG), \ __SMU_DUMMY_MAP(ATHUB_PG), \ + __SMU_DUMMY_MAP(APCC_DFLL), \ __SMU_DUMMY_MAP(WAFL_CG), #undef __SMU_DUMMY_MAP diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h index 5bda8539447a..606149085683 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h @@ -27,7 +27,7 @@ #define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU11_DRIVER_IF_VERSION_VG20 0x13 -#define SMU11_DRIVER_IF_VERSION_ARCT 0x09 +#define SMU11_DRIVER_IF_VERSION_ARCT 0x10 #define SMU11_DRIVER_IF_VERSION_NV10 0x33 #define SMU11_DRIVER_IF_VERSION_NV14 0x34 @@ -48,6 +48,8 @@ #define SMU11_TOOL_SIZE 0x19000 +#define MAX_PCIE_CONF 2 + #define CLK_MAP(clk, index) \ [SMU_##clk] = {1, (index)} @@ -88,6 +90,11 @@ struct smu_11_0_dpm_table { uint32_t max; /* MHz */ }; +struct smu_11_0_pcie_table { + uint8_t pcie_gen[MAX_PCIE_CONF]; + uint8_t pcie_lane[MAX_PCIE_CONF]; +}; + struct smu_11_0_dpm_tables { struct smu_11_0_dpm_table soc_table; struct smu_11_0_dpm_table gfx_table; @@ -100,6 +107,7 @@ struct smu_11_0_dpm_tables { struct smu_11_0_dpm_table display_table; struct smu_11_0_dpm_table phy_table; struct smu_11_0_dpm_table fclk_table; + struct smu_11_0_pcie_table pcie_table; }; struct smu_11_0_dpm_context { @@ -130,6 +138,128 @@ enum smu_v11_0_baco_seq { BACO_SEQ_COUNT, }; -void smu_v11_0_set_smu_funcs(struct smu_context *smu); +int smu_v11_0_init_microcode(struct smu_context *smu); + +int smu_v11_0_load_microcode(struct smu_context *smu); + +int smu_v11_0_init_smc_tables(struct smu_context *smu); + +int smu_v11_0_fini_smc_tables(struct smu_context *smu); + +int smu_v11_0_init_power(struct smu_context *smu); + +int smu_v11_0_fini_power(struct smu_context *smu); + +int smu_v11_0_check_fw_status(struct smu_context *smu); + +int smu_v11_0_setup_pptable(struct smu_context *smu); + +int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu); + +int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu); + +int smu_v11_0_check_pptable(struct smu_context *smu); + +int smu_v11_0_parse_pptable(struct smu_context *smu); + +int smu_v11_0_populate_smc_pptable(struct smu_context *smu); + +int smu_v11_0_check_fw_version(struct smu_context *smu); + +int smu_v11_0_write_pptable(struct smu_context *smu); + +int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu); + +int smu_v11_0_set_tool_table_location(struct smu_context *smu); + +int smu_v11_0_notify_memory_pool_location(struct smu_context *smu); + +int smu_v11_0_system_features_control(struct smu_context *smu, + bool en); + +int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg); + +int +smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, + uint32_t param); + +int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg); + +int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count); + +int smu_v11_0_set_allowed_mask(struct smu_context *smu); + +int smu_v11_0_get_enabled_mask(struct smu_context *smu, + uint32_t *feature_mask, uint32_t num); + +int smu_v11_0_notify_display_change(struct smu_context *smu); + +int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n); + +int smu_v11_0_get_current_clk_freq(struct smu_context *smu, + enum smu_clk_type clk_id, + uint32_t *value); + +int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu); + +int smu_v11_0_start_thermal_control(struct smu_context *smu); + +int smu_v11_0_stop_thermal_control(struct smu_context *smu); + +int smu_v11_0_read_sensor(struct smu_context *smu, + enum amd_pp_sensors sensor, + void *data, uint32_t *size); + +int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk); + +int +smu_v11_0_display_clock_voltage_request(struct smu_context *smu, + struct pp_display_clock_request + *clock_req); + +uint32_t +smu_v11_0_get_fan_control_mode(struct smu_context *smu); + +int +smu_v11_0_set_fan_control_mode(struct smu_context *smu, + uint32_t mode); + +int +smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed); + +int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, + uint32_t speed); + +int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate); + +int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable); + +int smu_v11_0_register_irq_handler(struct smu_context *smu); + +int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu); + +int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks); + +bool smu_v11_0_baco_is_support(struct smu_context *smu); + +enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu); + +int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state); + +int smu_v11_0_baco_reset(struct smu_context *smu); + +int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max); + +int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); + +int smu_v11_0_override_pcie_parameters(struct smu_context *smu); + +int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size); + +uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h index 86cdc3393eac..b2f96a101124 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h @@ -141,7 +141,9 @@ struct smu_11_0_powerplay_table struct smu_11_0_power_saving_clock_table power_saving_clock; struct smu_11_0_overdrive_table overdrive_table; +#ifndef SMU_11_0_PARTIAL_PPTABLE PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h +#endif } __attribute__((packed)); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h index acf3db12f59f..9b9f5df0911c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h @@ -37,6 +37,45 @@ struct smu_12_0_cmn2aisc_mapping { int map_to; }; -void smu_v12_0_set_smu_funcs(struct smu_context *smu); +int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, + uint16_t msg); + +int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg); + +int smu_v12_0_wait_for_response(struct smu_context *smu); + +int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg); + +int +smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, + uint32_t param); + +int smu_v12_0_check_fw_status(struct smu_context *smu); + +int smu_v12_0_check_fw_version(struct smu_context *smu); + +int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate); + +int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate); + +int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable); + +uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu); + +int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable); + +int smu_v12_0_init_smc_tables(struct smu_context *smu); + +int smu_v12_0_fini_smc_tables(struct smu_context *smu); + +int smu_v12_0_populate_smc_tables(struct smu_context *smu); + +int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max); + +int smu_v12_0_mode2_reset(struct smu_context *smu); + +int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h index a0883038f3c3..0c66f0fe1aaf 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega20_ppsmc.h @@ -120,7 +120,8 @@ #define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D #define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F #define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60 -#define PPSMC_Message_Count 0x61 +#define PPSMC_MSG_DFCstateControl 0x63 +#define PPSMC_Message_Count 0x64 typedef uint32_t PPSMC_Result; typedef uint32_t PPSMC_Msg; diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c index 328e258a6895..aaec884d63ed 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c @@ -26,6 +26,7 @@ #include <linux/pci.h> #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" @@ -35,6 +36,7 @@ #include "navi10_ppt.h" #include "smu_v11_0_pptable.h" #include "smu_v11_0_ppsmc.h" +#include "nbio/nbio_7_4_sh_mask.h" #include "asic_reg/mp/mp_11_0_sh_mask.h" @@ -177,6 +179,7 @@ static struct smu_11_0_cmn2aisc_mapping navi10_feature_mask_map[SMU_FEATURE_COUN FEA_MAP(TEMP_DEPENDENT_VMIN), FEA_MAP(MMHUB_PG), FEA_MAP(ATHUB_PG), + FEA_MAP(APCC_DFLL), }; static struct smu_11_0_cmn2aisc_mapping navi10_table_map[SMU_TABLE_COUNT] = { @@ -327,40 +330,52 @@ navi10_get_allowed_feature_mask(struct smu_context *smu, memset(feature_mask, 0, sizeof(uint32_t) * num); *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) - | FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT) - | FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT) | FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT) - | FEATURE_MASK(FEATURE_DPM_LINK_BIT) - | FEATURE_MASK(FEATURE_GFX_ULV_BIT) | FEATURE_MASK(FEATURE_RSMU_SMN_CG_BIT) | FEATURE_MASK(FEATURE_DS_SOCCLK_BIT) | FEATURE_MASK(FEATURE_PPT_BIT) | FEATURE_MASK(FEATURE_TDC_BIT) | FEATURE_MASK(FEATURE_GFX_EDC_BIT) + | FEATURE_MASK(FEATURE_APCC_PLUS_BIT) | FEATURE_MASK(FEATURE_VR0HOT_BIT) | FEATURE_MASK(FEATURE_FAN_CONTROL_BIT) | FEATURE_MASK(FEATURE_THERMAL_BIT) | FEATURE_MASK(FEATURE_LED_DISPLAY_BIT) - | FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT) - | FEATURE_MASK(FEATURE_DS_GFXCLK_BIT) + | FEATURE_MASK(FEATURE_DS_LCLK_BIT) | FEATURE_MASK(FEATURE_DS_DCEFCLK_BIT) | FEATURE_MASK(FEATURE_FW_DSTATE_BIT) | FEATURE_MASK(FEATURE_BACO_BIT) | FEATURE_MASK(FEATURE_ACDC_BIT) | FEATURE_MASK(FEATURE_GFX_SS_BIT) | FEATURE_MASK(FEATURE_APCC_DFLL_BIT) - | FEATURE_MASK(FEATURE_FW_CTF_BIT); + | FEATURE_MASK(FEATURE_FW_CTF_BIT) + | FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT); + + if (adev->pm.pp_feature & PP_SOCCLK_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_SOCCLK_BIT); + + if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT); + + if (adev->pm.pp_feature & PP_PCIE_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_LINK_BIT); + + if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCEFCLK_BIT); if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT) | FEATURE_MASK(FEATURE_MEM_VDDCI_SCALING_BIT) | FEATURE_MASK(FEATURE_MEM_MVDD_SCALING_BIT); - if (adev->pm.pp_feature & PP_GFXOFF_MASK) { + if (adev->pm.pp_feature & PP_ULV_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT); + + if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) + *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT); + + if (adev->pm.pp_feature & PP_GFXOFF_MASK) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT); - /* TODO: remove it once fw fix the bug */ - *(uint64_t *)feature_mask &= ~FEATURE_MASK(FEATURE_FW_DSTATE_BIT); - } if (smu->adev->pg_flags & AMD_PG_SUPPORT_MMHUB) *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MMHUB_PG_BIT); @@ -585,6 +600,7 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) struct smu_table_context *table_context = &smu->smu_table; struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context; PPTable_t *driver_ppt = NULL; + int i; driver_ppt = table_context->driver_pptable; @@ -615,6 +631,11 @@ static int navi10_set_default_dpm_table(struct smu_context *smu) dpm_context->dpm_tables.phy_table.min = driver_ppt->FreqTablePhyclk[0]; dpm_context->dpm_tables.phy_table.max = driver_ppt->FreqTablePhyclk[NUM_PHYCLK_DPM_LEVELS - 1]; + for (i = 0; i < MAX_PCIE_CONF; i++) { + dpm_context->dpm_tables.pcie_table.pcie_gen[i] = driver_ppt->PcieGenSpeed[i]; + dpm_context->dpm_tables.pcie_table.pcie_lane[i] = driver_ppt->PcieLaneCount[i]; + } + return 0; } @@ -677,13 +698,29 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu return dpm_desc->SnapToDiscrete == 0 ? true : false; } +static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature) +{ + return od_table->cap[feature]; +} + + static int navi10_print_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, char *buf) { + uint16_t *curve_settings; int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0; uint32_t freq_values[3] = {0}; uint32_t mark_index = 0; + struct smu_table_context *table_context = &smu->smu_table; + uint32_t gen_speed, lane_width; + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context; + struct amdgpu_device *adev = smu->adev; + PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable; + OverDriveTable_t *od_table = + (OverDriveTable_t *)table_context->overdrive_table; + struct smu_11_0_overdrive_table *od_settings = smu->od_settings; switch (clk_type) { case SMU_GFXCLK: @@ -734,6 +771,69 @@ static int navi10_print_clk_levels(struct smu_context *smu, } break; + case SMU_PCIE: + gen_speed = (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL) & + PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK) + >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT; + lane_width = (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL) & + PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK) + >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT; + for (i = 0; i < NUM_LINK_LEVELS; i++) + size += sprintf(buf + size, "%d: %s %s %dMhz %s\n", i, + (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," : + (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," : + (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," : + (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "", + (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" : + (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" : + (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" : + (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" : + (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" : + (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "", + pptable->LclkFreq[i], + (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) && + (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ? + "*" : ""); + break; + case SMU_OD_SCLK: + if (!smu->od_enabled || !od_table || !od_settings) + break; + if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) + break; + size += sprintf(buf + size, "OD_SCLK:\n"); + size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax); + break; + case SMU_OD_MCLK: + if (!smu->od_enabled || !od_table || !od_settings) + break; + if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) + break; + size += sprintf(buf + size, "OD_MCLK:\n"); + size += sprintf(buf + size, "0: %uMHz\n", od_table->UclkFmax); + break; + case SMU_OD_VDDC_CURVE: + if (!smu->od_enabled || !od_table || !od_settings) + break; + if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) + break; + size += sprintf(buf + size, "OD_VDDC_CURVE:\n"); + for (i = 0; i < 3; i++) { + switch (i) { + case 0: + curve_settings = &od_table->GfxclkFreq1; + break; + case 1: + curve_settings = &od_table->GfxclkFreq2; + break; + case 2: + curve_settings = &od_table->GfxclkFreq3; + break; + default: + break; + } + size += sprintf(buf + size, "%d: %uMHz @ %umV\n", i, curve_settings[0], curve_settings[1] / NAVI10_VOLTAGE_SCALE); + } + break; default: break; } @@ -789,13 +889,13 @@ static int navi10_populate_umd_state_clk(struct smu_context *smu) int ret = 0; uint32_t min_sclk_freq = 0, min_mclk_freq = 0; - ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL); + ret = smu_get_dpm_freq_range(smu, SMU_SCLK, &min_sclk_freq, NULL, false); if (ret) return ret; smu->pstate_sclk = min_sclk_freq * 100; - ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL); + ret = smu_get_dpm_freq_range(smu, SMU_MCLK, &min_mclk_freq, NULL, false); if (ret) return ret; @@ -848,7 +948,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu) return ret; if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT)) { - ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq); + ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &max_freq, false); if (ret) return ret; ret = smu_set_hard_freq_range(smu, SMU_UCLK, 0, max_freq); @@ -898,7 +998,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest) for (i = 0; i < ARRAY_SIZE(clks); i++) { clk_type = clks[i]; - ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); if (ret) return ret; @@ -925,7 +1025,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu) for (i = 0; i < ARRAY_SIZE(clks); i++) { clk_type = clks[i]; - ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq); + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); if (ret) return ret; @@ -1260,7 +1360,9 @@ static int navi10_notify_smc_dispaly_config(struct smu_context *smu) if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10; - if (!smu_display_clock_voltage_request(smu, &clock_req)) { + + ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req); + if (!ret) { if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, @@ -1414,7 +1516,7 @@ static int navi10_read_sensor(struct smu_context *smu, *size = 4; break; default: - ret = smu_smc_read_sensor(smu, sensor, data, size); + ret = smu_v11_0_read_sensor(smu, sensor, data, size); } mutex_unlock(&smu->sensor_lock); @@ -1457,18 +1559,47 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu) uint32_t sclk_freq = 0, uclk_freq = 0; uint32_t uclk_level = 0; - switch (adev->pdev->revision) { - case 0xf0: /* XTX */ - case 0xc0: - sclk_freq = NAVI10_PEAK_SCLK_XTX; - break; - case 0xf1: /* XT */ - case 0xc1: - sclk_freq = NAVI10_PEAK_SCLK_XT; + switch (adev->asic_type) { + case CHIP_NAVI10: + switch (adev->pdev->revision) { + case 0xf0: /* XTX */ + case 0xc0: + sclk_freq = NAVI10_PEAK_SCLK_XTX; + break; + case 0xf1: /* XT */ + case 0xc1: + sclk_freq = NAVI10_PEAK_SCLK_XT; + break; + default: /* XL */ + sclk_freq = NAVI10_PEAK_SCLK_XL; + break; + } break; - default: /* XL */ - sclk_freq = NAVI10_PEAK_SCLK_XL; + case CHIP_NAVI14: + switch (adev->pdev->revision) { + case 0xc7: /* XT */ + case 0xf4: + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK; + break; + case 0xc1: /* XTM */ + case 0xf2: + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK; + break; + case 0xc3: /* XLM */ + case 0xf3: + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK; + break; + case 0xc5: /* XTX */ + case 0xf6: + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK; + break; + default: /* XL */ + sclk_freq = NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK; + break; + } break; + default: + return -EINVAL; } ret = smu_get_dpm_level_count(smu, SMU_UCLK, &uclk_level); @@ -1491,10 +1622,6 @@ static int navi10_set_peak_clock_by_device(struct smu_context *smu) static int navi10_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) { int ret = 0; - struct amdgpu_device *adev = smu->adev; - - if (adev->asic_type != CHIP_NAVI10) - return -EINVAL; switch (level) { case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: @@ -1547,17 +1674,22 @@ static int navi10_display_disable_memory_clock_switch(struct smu_context *smu, return ret; } +static uint32_t navi10_get_pptable_power_limit(struct smu_context *smu) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; +} + static int navi10_get_power_limit(struct smu_context *smu, uint32_t *limit, - bool asic_default) + bool cap) { PPTable_t *pptable = smu->smu_table.driver_pptable; uint32_t asic_default_power_limit = 0; int ret = 0; int power_src; - if (!smu->default_power_limit || - !smu->power_limit) { + if (!smu->power_limit) { if (smu_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) { power_src = smu_power_get_index(smu, SMU_POWER_SOURCE_AC); if (power_src < 0) @@ -1580,23 +1712,291 @@ static int navi10_get_power_limit(struct smu_context *smu, pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0]; } - if (smu->od_enabled) { - asic_default_power_limit *= (100 + smu->smu_table.TDPODLimit); - asic_default_power_limit /= 100; - } - - smu->default_power_limit = asic_default_power_limit; smu->power_limit = asic_default_power_limit; } - if (asic_default) - *limit = smu->default_power_limit; + if (cap) + *limit = smu_v11_0_get_max_power_limit(smu); else *limit = smu->power_limit; return 0; } +static int navi10_update_pcie_parameters(struct smu_context *smu, + uint32_t pcie_gen_cap, + uint32_t pcie_width_cap) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + int ret, i; + uint32_t smu_pcie_arg; + + struct smu_dpm_context *smu_dpm = &smu->smu_dpm; + struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context; + + for (i = 0; i < NUM_LINK_LEVELS; i++) { + smu_pcie_arg = (i << 16) | + ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) : + (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? + pptable->PcieLaneCount[i] : pcie_width_cap); + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg); + + if (ret) + return ret; + + if (pptable->PcieGenSpeed[i] > pcie_gen_cap) + dpm_context->dpm_tables.pcie_table.pcie_gen[i] = pcie_gen_cap; + if (pptable->PcieLaneCount[i] > pcie_width_cap) + dpm_context->dpm_tables.pcie_table.pcie_lane[i] = pcie_width_cap; + } + + return 0; +} + +static inline void navi10_dump_od_table(OverDriveTable_t *od_table) { + pr_debug("OD: Gfxclk: (%d, %d)\n", od_table->GfxclkFmin, od_table->GfxclkFmax); + pr_debug("OD: Gfx1: (%d, %d)\n", od_table->GfxclkFreq1, od_table->GfxclkVolt1); + pr_debug("OD: Gfx2: (%d, %d)\n", od_table->GfxclkFreq2, od_table->GfxclkVolt2); + pr_debug("OD: Gfx3: (%d, %d)\n", od_table->GfxclkFreq3, od_table->GfxclkVolt3); + pr_debug("OD: UclkFmax: %d\n", od_table->UclkFmax); + pr_debug("OD: OverDrivePct: %d\n", od_table->OverDrivePct); +} + +static int navi10_od_setting_check_range(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODSETTING_ID setting, uint32_t value) +{ + if (value < od_table->min[setting]) { + pr_warn("OD setting (%d, %d) is less than the minimum allowed (%d)\n", setting, value, od_table->min[setting]); + return -EINVAL; + } + if (value > od_table->max[setting]) { + pr_warn("OD setting (%d, %d) is greater than the maximum allowed (%d)\n", setting, value, od_table->max[setting]); + return -EINVAL; + } + return 0; +} + +static int navi10_setup_od_limits(struct smu_context *smu) { + struct smu_11_0_overdrive_table *overdrive_table = NULL; + struct smu_11_0_powerplay_table *powerplay_table = NULL; + + if (!smu->smu_table.power_play_table) { + pr_err("powerplay table uninitialized!\n"); + return -ENOENT; + } + powerplay_table = (struct smu_11_0_powerplay_table *)smu->smu_table.power_play_table; + overdrive_table = &powerplay_table->overdrive_table; + if (!smu->od_settings) { + smu->od_settings = kmemdup(overdrive_table, sizeof(struct smu_11_0_overdrive_table), GFP_KERNEL); + } else { + memcpy(smu->od_settings, overdrive_table, sizeof(struct smu_11_0_overdrive_table)); + } + return 0; +} + +static int navi10_set_default_od_settings(struct smu_context *smu, bool initialize) { + OverDriveTable_t *od_table; + int ret = 0; + + ret = smu_v11_0_set_default_od_settings(smu, initialize, sizeof(OverDriveTable_t)); + if (ret) + return ret; + + if (initialize) { + ret = navi10_setup_od_limits(smu); + if (ret) { + pr_err("Failed to retrieve board OD limits\n"); + return ret; + } + + } + + od_table = (OverDriveTable_t *)smu->smu_table.overdrive_table; + if (od_table) { + navi10_dump_od_table(od_table); + } + + return ret; +} + +static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) { + int i; + int ret = 0; + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTable_t *od_table; + struct smu_11_0_overdrive_table *od_settings; + enum SMU_11_0_ODSETTING_ID freq_setting, voltage_setting; + uint16_t *freq_ptr, *voltage_ptr; + od_table = (OverDriveTable_t *)table_context->overdrive_table; + + if (!smu->od_enabled) { + pr_warn("OverDrive is not enabled!\n"); + return -EINVAL; + } + + if (!smu->od_settings) { + pr_err("OD board limits are not set!\n"); + return -ENOENT; + } + + od_settings = smu->od_settings; + + switch (type) { + case PP_OD_EDIT_SCLK_VDDC_TABLE: + if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) { + pr_warn("GFXCLK_LIMITS not supported!\n"); + return -ENOTSUPP; + } + if (!table_context->overdrive_table) { + pr_err("Overdrive is not initialized\n"); + return -EINVAL; + } + for (i = 0; i < size; i += 2) { + if (i + 2 > size) { + pr_info("invalid number of input parameters %d\n", size); + return -EINVAL; + } + switch (input[i]) { + case 0: + freq_setting = SMU_11_0_ODSETTING_GFXCLKFMIN; + freq_ptr = &od_table->GfxclkFmin; + if (input[i + 1] > od_table->GfxclkFmax) { + pr_info("GfxclkFmin (%ld) must be <= GfxclkFmax (%u)!\n", + input[i + 1], + od_table->GfxclkFmin); + return -EINVAL; + } + break; + case 1: + freq_setting = SMU_11_0_ODSETTING_GFXCLKFMAX; + freq_ptr = &od_table->GfxclkFmax; + if (input[i + 1] < od_table->GfxclkFmin) { + pr_info("GfxclkFmax (%ld) must be >= GfxclkFmin (%u)!\n", + input[i + 1], + od_table->GfxclkFmax); + return -EINVAL; + } + break; + default: + pr_info("Invalid SCLK_VDDC_TABLE index: %ld\n", input[i]); + pr_info("Supported indices: [0:min,1:max]\n"); + return -EINVAL; + } + ret = navi10_od_setting_check_range(od_settings, freq_setting, input[i + 1]); + if (ret) + return ret; + *freq_ptr = input[i + 1]; + } + break; + case PP_OD_EDIT_MCLK_VDDC_TABLE: + if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) { + pr_warn("UCLK_MAX not supported!\n"); + return -ENOTSUPP; + } + if (size < 2) { + pr_info("invalid number of parameters: %d\n", size); + return -EINVAL; + } + if (input[0] != 1) { + pr_info("Invalid MCLK_VDDC_TABLE index: %ld\n", input[0]); + pr_info("Supported indices: [1:max]\n"); + return -EINVAL; + } + ret = navi10_od_setting_check_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX, input[1]); + if (ret) + return ret; + od_table->UclkFmax = input[1]; + break; + case PP_OD_COMMIT_DPM_TABLE: + navi10_dump_od_table(od_table); + ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)od_table, true); + if (ret) { + pr_err("Failed to import overdrive table!\n"); + return ret; + } + // no lock needed because smu_od_edit_dpm_table has it + ret = smu_handle_task(smu, smu->smu_dpm.dpm_level, + AMD_PP_TASK_READJUST_POWER_STATE, + false); + if (ret) { + return ret; + } + break; + case PP_OD_EDIT_VDDC_CURVE: + if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) { + pr_warn("GFXCLK_CURVE not supported!\n"); + return -ENOTSUPP; + } + if (size < 3) { + pr_info("invalid number of parameters: %d\n", size); + return -EINVAL; + } + if (!od_table) { + pr_info("Overdrive is not initialized\n"); + return -EINVAL; + } + + switch (input[0]) { + case 0: + freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1; + voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1; + freq_ptr = &od_table->GfxclkFreq1; + voltage_ptr = &od_table->GfxclkVolt1; + break; + case 1: + freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2; + voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2; + freq_ptr = &od_table->GfxclkFreq2; + voltage_ptr = &od_table->GfxclkVolt2; + break; + case 2: + freq_setting = SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3; + voltage_setting = SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3; + freq_ptr = &od_table->GfxclkFreq3; + voltage_ptr = &od_table->GfxclkVolt3; + break; + default: + pr_info("Invalid VDDC_CURVE index: %ld\n", input[0]); + pr_info("Supported indices: [0, 1, 2]\n"); + return -EINVAL; + } + ret = navi10_od_setting_check_range(od_settings, freq_setting, input[1]); + if (ret) + return ret; + // Allow setting zero to disable the OverDrive VDDC curve + if (input[2] != 0) { + ret = navi10_od_setting_check_range(od_settings, voltage_setting, input[2]); + if (ret) + return ret; + *freq_ptr = input[1]; + *voltage_ptr = ((uint16_t)input[2]) * NAVI10_VOLTAGE_SCALE; + pr_debug("OD: set curve %ld: (%d, %d)\n", input[0], *freq_ptr, *voltage_ptr); + } else { + // If setting 0, disable all voltage curve settings + od_table->GfxclkVolt1 = 0; + od_table->GfxclkVolt2 = 0; + od_table->GfxclkVolt3 = 0; + } + navi10_dump_od_table(od_table); + break; + default: + return -ENOSYS; + } + return ret; +} + +static int navi10_run_btc(struct smu_context *smu) +{ + int ret = 0; + + ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc); + if (ret) + pr_err("RunBtc failed!\n"); + + return ret; +} + static const struct pptable_funcs navi10_ppt_funcs = { .tables_init = navi10_tables_init, .alloc_dpm_context = navi10_allocate_dpm_context, @@ -1635,12 +2035,63 @@ static const struct pptable_funcs navi10_ppt_funcs = { .get_thermal_temperature_range = navi10_get_thermal_temperature_range, .display_disable_memory_clock_switch = navi10_display_disable_memory_clock_switch, .get_power_limit = navi10_get_power_limit, + .update_pcie_parameters = navi10_update_pcie_parameters, + .init_microcode = smu_v11_0_init_microcode, + .load_microcode = smu_v11_0_load_microcode, + .init_smc_tables = smu_v11_0_init_smc_tables, + .fini_smc_tables = smu_v11_0_fini_smc_tables, + .init_power = smu_v11_0_init_power, + .fini_power = smu_v11_0_fini_power, + .check_fw_status = smu_v11_0_check_fw_status, + .setup_pptable = smu_v11_0_setup_pptable, + .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, + .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios, + .check_pptable = smu_v11_0_check_pptable, + .parse_pptable = smu_v11_0_parse_pptable, + .populate_smc_tables = smu_v11_0_populate_smc_pptable, + .check_fw_version = smu_v11_0_check_fw_version, + .write_pptable = smu_v11_0_write_pptable, + .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, + .set_tool_table_location = smu_v11_0_set_tool_table_location, + .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, + .system_features_control = smu_v11_0_system_features_control, + .send_smc_msg = smu_v11_0_send_msg, + .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, + .read_smc_arg = smu_v11_0_read_arg, + .init_display_count = smu_v11_0_init_display_count, + .set_allowed_mask = smu_v11_0_set_allowed_mask, + .get_enabled_mask = smu_v11_0_get_enabled_mask, + .notify_display_change = smu_v11_0_notify_display_change, + .set_power_limit = smu_v11_0_set_power_limit, + .get_current_clk_freq = smu_v11_0_get_current_clk_freq, + .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, + .start_thermal_control = smu_v11_0_start_thermal_control, + .stop_thermal_control = smu_v11_0_stop_thermal_control, + .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, + .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, + .get_fan_control_mode = smu_v11_0_get_fan_control_mode, + .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, + .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, + .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, + .gfx_off_control = smu_v11_0_gfx_off_control, + .register_irq_handler = smu_v11_0_register_irq_handler, + .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, + .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, + .baco_is_support= smu_v11_0_baco_is_support, + .baco_get_state = smu_v11_0_baco_get_state, + .baco_set_state = smu_v11_0_baco_set_state, + .baco_reset = smu_v11_0_baco_reset, + .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, + .override_pcie_parameters = smu_v11_0_override_pcie_parameters, + .set_default_od_settings = navi10_set_default_od_settings, + .od_edit_dpm_table = navi10_od_edit_dpm_table, + .get_pptable_power_limit = navi10_get_pptable_power_limit, + .run_btc = navi10_run_btc, }; void navi10_set_ppt_funcs(struct smu_context *smu) { - struct smu_table_context *smu_table = &smu->smu_table; - smu->ppt_funcs = &navi10_ppt_funcs; - smu_table->table_count = TABLE_COUNT; } diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h index 620ff17c2fef..ec03c7992f6d 100644 --- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.h @@ -27,6 +27,17 @@ #define NAVI10_PEAK_SCLK_XT (1755) #define NAVI10_PEAK_SCLK_XL (1625) +#define NAVI14_UMD_PSTATE_PEAK_XT_GFXCLK (1670) +#define NAVI14_UMD_PSTATE_PEAK_XTM_GFXCLK (1448) +#define NAVI14_UMD_PSTATE_PEAK_XLM_GFXCLK (1181) +#define NAVI14_UMD_PSTATE_PEAK_XTX_GFXCLK (1717) +#define NAVI14_UMD_PSTATE_PEAK_XL_GFXCLK (1448) + +#define NAVI10_VOLTAGE_SCALE (4) + +#define smnPCIE_LC_SPEED_CNTL 0x11140290 +#define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288 + extern void navi10_set_ppt_funcs(struct smu_context *smu); #endif diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index e62bfba51562..04daf7e9fe05 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -23,6 +23,7 @@ #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "soc15_common.h" #include "smu_v12_0_ppsmc.h" #include "smu12_driver_if.h" @@ -160,21 +161,17 @@ static int renoir_tables_init(struct smu_context *smu, struct smu_table *tables) * This interface just for getting uclk ultimate freq and should't introduce * other likewise function result in overmuch callback. */ -static int renoir_get_dpm_uclk_limited(struct smu_context *smu, uint32_t *clock, bool max) +static int renoir_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t dpm_level, uint32_t *freq) { + DpmClocks_t *clk_table = smu->smu_table.clocks_table; - DpmClocks_t *table = smu->smu_table.clocks_table; - - if (!clock || !table) + if (!clk_table || clk_type >= SMU_CLK_COUNT) return -EINVAL; - if (max) - *clock = table->FClocks[NUM_FCLK_DPM_LEVELS-1].Freq; - else - *clock = table->FClocks[0].Freq; + GET_DPM_CUR_FREQ(clk_table, clk_type, dpm_level, *freq); return 0; - } static int renoir_print_clk_levels(struct smu_context *smu, @@ -183,11 +180,13 @@ static int renoir_print_clk_levels(struct smu_context *smu, int i, size = 0, ret = 0; uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0; DpmClocks_t *clk_table = smu->smu_table.clocks_table; - SmuMetrics_t metrics = {0}; + SmuMetrics_t metrics; if (!clk_table || clk_type >= SMU_CLK_COUNT) return -EINVAL; + memset(&metrics, 0, sizeof(metrics)); + ret = smu_update_table(smu, SMU_TABLE_SMU_METRICS, 0, (void *)&metrics, false); if (ret) @@ -198,7 +197,7 @@ static int renoir_print_clk_levels(struct smu_context *smu, case SMU_SCLK: /* retirve table returned paramters unit is MHz */ cur_value = metrics.ClockFrequency[CLOCK_GFXCLK]; - ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max); + ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min, &max, false); if (!ret) { /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */ if (cur_value == max) @@ -246,20 +245,474 @@ static int renoir_print_clk_levels(struct smu_context *smu, return size; } +static enum amd_pm_state_type renoir_get_current_power_state(struct smu_context *smu) +{ + enum amd_pm_state_type pm_type; + struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); + + if (!smu_dpm_ctx->dpm_context || + !smu_dpm_ctx->dpm_current_power_state) + return -EINVAL; + + switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) { + case SMU_STATE_UI_LABEL_BATTERY: + pm_type = POWER_STATE_TYPE_BATTERY; + break; + case SMU_STATE_UI_LABEL_BALLANCED: + pm_type = POWER_STATE_TYPE_BALANCED; + break; + case SMU_STATE_UI_LABEL_PERFORMANCE: + pm_type = POWER_STATE_TYPE_PERFORMANCE; + break; + default: + if (smu_dpm_ctx->dpm_current_power_state->classification.flags & SMU_STATE_CLASSIFICATION_FLAG_BOOT) + pm_type = POWER_STATE_TYPE_INTERNAL_BOOT; + else + pm_type = POWER_STATE_TYPE_DEFAULT; + break; + } + + return pm_type; +} + +static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable) +{ + struct smu_power_context *smu_power = &smu->smu_power; + struct smu_power_gate *power_gate = &smu_power->power_gate; + int ret = 0; + + if (enable) { + /* vcn dpm on is a prerequisite for vcn power gate messages */ + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0); + if (ret) + return ret; + } + power_gate->vcn_gated = false; + } else { + if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) { + ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn); + if (ret) + return ret; + } + power_gate->vcn_gated = true; + } + + return ret; +} + +static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest) +{ + int ret = 0, i = 0; + uint32_t min_freq, max_freq, force_freq; + enum smu_clk_type clk_type; + + enum smu_clk_type clks[] = { + SMU_GFXCLK, + SMU_MCLK, + SMU_SOCCLK, + }; + + for (i = 0; i < ARRAY_SIZE(clks); i++) { + clk_type = clks[i]; + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); + if (ret) + return ret; + + force_freq = highest ? max_freq : min_freq; + ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq); + if (ret) + return ret; + } + + return ret; +} + +static int renoir_unforce_dpm_levels(struct smu_context *smu) { + + int ret = 0, i = 0; + uint32_t min_freq, max_freq; + enum smu_clk_type clk_type; + + struct clk_feature_map { + enum smu_clk_type clk_type; + uint32_t feature; + } clk_feature_map[] = { + {SMU_GFXCLK, SMU_FEATURE_DPM_GFXCLK_BIT}, + {SMU_MCLK, SMU_FEATURE_DPM_UCLK_BIT}, + {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT}, + }; + + for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) { + if (!smu_feature_is_enabled(smu, clk_feature_map[i].feature)) + continue; + + clk_type = clk_feature_map[i].clk_type; + + ret = smu_get_dpm_freq_range(smu, clk_type, &min_freq, &max_freq, false); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq); + if (ret) + return ret; + } + + return ret; +} + +static int renoir_get_workload_type(struct smu_context *smu, uint32_t profile) +{ + + uint32_t pplib_workload = 0; + + switch (profile) { + case PP_SMC_POWER_PROFILE_FULLSCREEN3D: + pplib_workload = WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT; + break; + case PP_SMC_POWER_PROFILE_CUSTOM: + pplib_workload = WORKLOAD_PPLIB_COUNT; + break; + case PP_SMC_POWER_PROFILE_VIDEO: + pplib_workload = WORKLOAD_PPLIB_VIDEO_BIT; + break; + case PP_SMC_POWER_PROFILE_VR: + pplib_workload = WORKLOAD_PPLIB_VR_BIT; + break; + case PP_SMC_POWER_PROFILE_COMPUTE: + pplib_workload = WORKLOAD_PPLIB_COMPUTE_BIT; + break; + default: + return -EINVAL; + } + + return pplib_workload; +} + +static int renoir_get_profiling_clk_mask(struct smu_context *smu, + enum amd_dpm_forced_level level, + uint32_t *sclk_mask, + uint32_t *mclk_mask, + uint32_t *soc_mask) +{ + + if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) { + if (sclk_mask) + *sclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { + if (mclk_mask) + *mclk_mask = 0; + } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { + if(sclk_mask) + /* The sclk as gfxclk and has three level about max/min/current */ + *sclk_mask = 3 - 1; + + if(mclk_mask) + *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1; + + if(soc_mask) + *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1; + } + + return 0; +} + +/** + * This interface get dpm clock table for dc + */ +static int renoir_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table) +{ + DpmClocks_t *table = smu->smu_table.clocks_table; + int i; + + if (!clock_table || !table) + return -EINVAL; + + for (i = 0; i < NUM_DCFCLK_DPM_LEVELS; i++) { + clock_table->DcfClocks[i].Freq = table->DcfClocks[i].Freq; + clock_table->DcfClocks[i].Vol = table->DcfClocks[i].Vol; + } + + for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) { + clock_table->SocClocks[i].Freq = table->SocClocks[i].Freq; + clock_table->SocClocks[i].Vol = table->SocClocks[i].Vol; + } + + for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) { + clock_table->FClocks[i].Freq = table->FClocks[i].Freq; + clock_table->FClocks[i].Vol = table->FClocks[i].Vol; + } + + for (i = 0; i< NUM_MEMCLK_DPM_LEVELS; i++) { + clock_table->MemClocks[i].Freq = table->MemClocks[i].Freq; + clock_table->MemClocks[i].Vol = table->MemClocks[i].Vol; + } + + return 0; +} + +static int renoir_force_clk_levels(struct smu_context *smu, + enum smu_clk_type clk_type, uint32_t mask) +{ + + int ret = 0 ; + uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0; + DpmClocks_t *clk_table = smu->smu_table.clocks_table; + + soft_min_level = mask ? (ffs(mask) - 1) : 0; + soft_max_level = mask ? (fls(mask) - 1) : 0; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + if (soft_min_level > 2 || soft_max_level > 2) { + pr_info("Currently sclk only support 3 levels on APU\n"); + return -EINVAL; + } + + ret = smu_get_dpm_freq_range(smu, SMU_GFXCLK, &min_freq, &max_freq, false); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, + soft_max_level == 0 ? min_freq : + soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, + soft_min_level == 2 ? max_freq : + soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq); + if (ret) + return ret; + break; + case SMU_SOCCLK: + GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); + GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq); + if (ret) + return ret; + break; + case SMU_MCLK: + case SMU_FCLK: + GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq); + GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq); + if (ret) + return ret; + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq); + if (ret) + return ret; + break; + default: + break; + } + + return ret; +} + +static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size) +{ + int workload_type, ret; + uint32_t profile_mode = input[size]; + + if (profile_mode > PP_SMC_POWER_PROFILE_CUSTOM) { + pr_err("Invalid power profile mode %d\n", smu->power_profile_mode); + return -EINVAL; + } + + /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */ + workload_type = smu_workload_get_type(smu, smu->power_profile_mode); + if (workload_type < 0) { + pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode); + return -EINVAL; + } + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask, + 1 << workload_type); + if (ret) { + pr_err("Fail to set workload type %d\n", workload_type); + return ret; + } + + smu->power_profile_mode = profile_mode; + + return 0; +} + +static int renoir_set_peak_clock_by_device(struct smu_context *smu) +{ + int ret = 0; + uint32_t sclk_freq = 0, uclk_freq = 0; + + ret = smu_get_dpm_freq_range(smu, SMU_SCLK, NULL, &sclk_freq, false); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq); + if (ret) + return ret; + + ret = smu_get_dpm_freq_range(smu, SMU_UCLK, NULL, &uclk_freq, false); + if (ret) + return ret; + + ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq); + if (ret) + return ret; + + return ret; +} + +static int renoir_set_performance_level(struct smu_context *smu, enum amd_dpm_forced_level level) +{ + int ret = 0; + + switch (level) { + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + ret = renoir_set_peak_clock_by_device(smu); + break; + default: + ret = -EINVAL; + break; + } + + return ret; +} + +/* save watermark settings into pplib smu structure, + * also pass data to smu controller + */ +static int renoir_set_watermarks_table( + struct smu_context *smu, + void *watermarks, + struct dm_pp_wm_sets_with_clock_ranges_soc15 *clock_ranges) +{ + int i; + int ret = 0; + Watermarks_t *table = watermarks; + + if (!table || !clock_ranges) + return -EINVAL; + + if (clock_ranges->num_wm_dmif_sets > 4 || + clock_ranges->num_wm_mcif_sets > 4) + return -EINVAL; + + /* save into smu->smu_table.tables[SMU_TABLE_WATERMARKS]->cpu_addr*/ + for (i = 0; i < clock_ranges->num_wm_dmif_sets; i++) { + table->WatermarkRow[WM_DCFCLK][i].MinClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_dcfclk_clk_in_khz)); + table->WatermarkRow[WM_DCFCLK][i].MaxClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_dcfclk_clk_in_khz)); + table->WatermarkRow[WM_DCFCLK][i].MinMclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_min_mem_clk_in_khz)); + table->WatermarkRow[WM_DCFCLK][i].MaxMclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_dmif_clocks_ranges[i].wm_max_mem_clk_in_khz)); + table->WatermarkRow[WM_DCFCLK][i].WmSetting = (uint8_t) + clock_ranges->wm_dmif_clocks_ranges[i].wm_set_id; + } + + for (i = 0; i < clock_ranges->num_wm_mcif_sets; i++) { + table->WatermarkRow[WM_SOCCLK][i].MinClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_socclk_clk_in_khz)); + table->WatermarkRow[WM_SOCCLK][i].MaxClock = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_socclk_clk_in_khz)); + table->WatermarkRow[WM_SOCCLK][i].MinMclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_min_mem_clk_in_khz)); + table->WatermarkRow[WM_SOCCLK][i].MaxMclk = + cpu_to_le16((uint16_t) + (clock_ranges->wm_mcif_clocks_ranges[i].wm_max_mem_clk_in_khz)); + table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t) + clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; + } + + /* pass data to smu controller */ + ret = smu_write_watermarks_table(smu); + + return ret; +} + +static int renoir_get_power_profile_mode(struct smu_context *smu, + char *buf) +{ + static const char *profile_name[] = { + "BOOTUP_DEFAULT", + "3D_FULL_SCREEN", + "POWER_SAVING", + "VIDEO", + "VR", + "COMPUTE", + "CUSTOM"}; + uint32_t i, size = 0; + int16_t workload_type = 0; + + if (!smu->pm_enabled || !buf) + return -EINVAL; + + for (i = 0; i <= PP_SMC_POWER_PROFILE_CUSTOM; i++) { + /* + * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT + * Not all profile modes are supported on arcturus. + */ + workload_type = smu_workload_get_type(smu, i); + if (workload_type < 0) + continue; + + size += sprintf(buf + size, "%2d %14s%s\n", + i, profile_name[i], (i == smu->power_profile_mode) ? "*" : " "); + } + + return size; +} + static const struct pptable_funcs renoir_ppt_funcs = { .get_smu_msg_index = renoir_get_smu_msg_index, .get_smu_table_index = renoir_get_smu_table_index, .tables_init = renoir_tables_init, .set_power_state = NULL, - .get_dpm_uclk_limited = renoir_get_dpm_uclk_limited, + .get_dpm_clk_limited = renoir_get_dpm_clk_limited, .print_clk_levels = renoir_print_clk_levels, + .get_current_power_state = renoir_get_current_power_state, + .dpm_set_uvd_enable = renoir_dpm_set_uvd_enable, + .force_dpm_limit_value = renoir_force_dpm_limit_value, + .unforce_dpm_levels = renoir_unforce_dpm_levels, + .get_workload_type = renoir_get_workload_type, + .get_profiling_clk_mask = renoir_get_profiling_clk_mask, + .force_clk_levels = renoir_force_clk_levels, + .set_power_profile_mode = renoir_set_power_profile_mode, + .set_performance_level = renoir_set_performance_level, + .get_dpm_clock_table = renoir_get_dpm_clock_table, + .set_watermarks_table = renoir_set_watermarks_table, + .get_power_profile_mode = renoir_get_power_profile_mode, + .check_fw_status = smu_v12_0_check_fw_status, + .check_fw_version = smu_v12_0_check_fw_version, + .powergate_sdma = smu_v12_0_powergate_sdma, + .powergate_vcn = smu_v12_0_powergate_vcn, + .send_smc_msg = smu_v12_0_send_msg, + .send_smc_msg_with_param = smu_v12_0_send_msg_with_param, + .read_smc_arg = smu_v12_0_read_arg, + .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, + .gfx_off_control = smu_v12_0_gfx_off_control, + .init_smc_tables = smu_v12_0_init_smc_tables, + .fini_smc_tables = smu_v12_0_fini_smc_tables, + .populate_smc_tables = smu_v12_0_populate_smc_tables, + .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq, + .mode2_reset = smu_v12_0_mode2_reset, + .set_soft_freq_limited_range = smu_v12_0_set_soft_freq_limited_range, }; void renoir_set_ppt_funcs(struct smu_context *smu) { - struct smu_table_context *smu_table = &smu->smu_table; - smu->ppt_funcs = &renoir_ppt_funcs; smu->smc_if_version = SMU12_DRIVER_IF_VERSION; - smu_table->table_count = TABLE_COUNT; + smu->is_apu = true; } diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h new file mode 100644 index 000000000000..8bcda7871309 --- /dev/null +++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h @@ -0,0 +1,204 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __SMU_INTERNAL_H__ +#define __SMU_INTERNAL_H__ + +#include "amdgpu_smu.h" + +#define smu_init_microcode(smu) \ + ((smu)->ppt_funcs->init_microcode ? (smu)->ppt_funcs->init_microcode((smu)) : 0) +#define smu_init_smc_tables(smu) \ + ((smu)->ppt_funcs->init_smc_tables ? (smu)->ppt_funcs->init_smc_tables((smu)) : 0) +#define smu_fini_smc_tables(smu) \ + ((smu)->ppt_funcs->fini_smc_tables ? (smu)->ppt_funcs->fini_smc_tables((smu)) : 0) +#define smu_init_power(smu) \ + ((smu)->ppt_funcs->init_power ? (smu)->ppt_funcs->init_power((smu)) : 0) +#define smu_fini_power(smu) \ + ((smu)->ppt_funcs->fini_power ? (smu)->ppt_funcs->fini_power((smu)) : 0) + +#define smu_setup_pptable(smu) \ + ((smu)->ppt_funcs->setup_pptable ? (smu)->ppt_funcs->setup_pptable((smu)) : 0) +#define smu_powergate_sdma(smu, gate) \ + ((smu)->ppt_funcs->powergate_sdma ? (smu)->ppt_funcs->powergate_sdma((smu), (gate)) : 0) +#define smu_powergate_vcn(smu, gate) \ + ((smu)->ppt_funcs->powergate_vcn ? (smu)->ppt_funcs->powergate_vcn((smu), (gate)) : 0) + +#define smu_get_vbios_bootup_values(smu) \ + ((smu)->ppt_funcs->get_vbios_bootup_values ? (smu)->ppt_funcs->get_vbios_bootup_values((smu)) : 0) +#define smu_get_clk_info_from_vbios(smu) \ + ((smu)->ppt_funcs->get_clk_info_from_vbios ? (smu)->ppt_funcs->get_clk_info_from_vbios((smu)) : 0) +#define smu_check_pptable(smu) \ + ((smu)->ppt_funcs->check_pptable ? (smu)->ppt_funcs->check_pptable((smu)) : 0) +#define smu_parse_pptable(smu) \ + ((smu)->ppt_funcs->parse_pptable ? (smu)->ppt_funcs->parse_pptable((smu)) : 0) +#define smu_populate_smc_tables(smu) \ + ((smu)->ppt_funcs->populate_smc_tables ? (smu)->ppt_funcs->populate_smc_tables((smu)) : 0) +#define smu_check_fw_version(smu) \ + ((smu)->ppt_funcs->check_fw_version ? (smu)->ppt_funcs->check_fw_version((smu)) : 0) +#define smu_write_pptable(smu) \ + ((smu)->ppt_funcs->write_pptable ? (smu)->ppt_funcs->write_pptable((smu)) : 0) +#define smu_set_min_dcef_deep_sleep(smu) \ + ((smu)->ppt_funcs->set_min_dcef_deep_sleep ? (smu)->ppt_funcs->set_min_dcef_deep_sleep((smu)) : 0) +#define smu_set_tool_table_location(smu) \ + ((smu)->ppt_funcs->set_tool_table_location ? (smu)->ppt_funcs->set_tool_table_location((smu)) : 0) +#define smu_notify_memory_pool_location(smu) \ + ((smu)->ppt_funcs->notify_memory_pool_location ? (smu)->ppt_funcs->notify_memory_pool_location((smu)) : 0) +#define smu_gfx_off_control(smu, enable) \ + ((smu)->ppt_funcs->gfx_off_control ? (smu)->ppt_funcs->gfx_off_control((smu), (enable)) : 0) + +#define smu_set_last_dcef_min_deep_sleep_clk(smu) \ + ((smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk ? (smu)->ppt_funcs->set_last_dcef_min_deep_sleep_clk((smu)) : 0) +#define smu_system_features_control(smu, en) \ + ((smu)->ppt_funcs->system_features_control ? (smu)->ppt_funcs->system_features_control((smu), (en)) : 0) +#define smu_init_max_sustainable_clocks(smu) \ + ((smu)->ppt_funcs->init_max_sustainable_clocks ? (smu)->ppt_funcs->init_max_sustainable_clocks((smu)) : 0) +#define smu_set_default_od_settings(smu, initialize) \ + ((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0) + +#define smu_send_smc_msg(smu, msg) \ + ((smu)->ppt_funcs->send_smc_msg? (smu)->ppt_funcs->send_smc_msg((smu), (msg)) : 0) +#define smu_send_smc_msg_with_param(smu, msg, param) \ + ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0) +#define smu_read_smc_arg(smu, arg) \ + ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0) +#define smu_alloc_dpm_context(smu) \ + ((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0) +#define smu_init_display_count(smu, count) \ + ((smu)->ppt_funcs->init_display_count ? (smu)->ppt_funcs->init_display_count((smu), (count)) : 0) +#define smu_feature_set_allowed_mask(smu) \ + ((smu)->ppt_funcs->set_allowed_mask? (smu)->ppt_funcs->set_allowed_mask((smu)) : 0) +#define smu_feature_get_enabled_mask(smu, mask, num) \ + ((smu)->ppt_funcs->get_enabled_mask? (smu)->ppt_funcs->get_enabled_mask((smu), (mask), (num)) : 0) +#define smu_is_dpm_running(smu) \ + ((smu)->ppt_funcs->is_dpm_running ? (smu)->ppt_funcs->is_dpm_running((smu)) : 0) +#define smu_notify_display_change(smu) \ + ((smu)->ppt_funcs->notify_display_change? (smu)->ppt_funcs->notify_display_change((smu)) : 0) +#define smu_store_powerplay_table(smu) \ + ((smu)->ppt_funcs->store_powerplay_table ? (smu)->ppt_funcs->store_powerplay_table((smu)) : 0) +#define smu_check_powerplay_table(smu) \ + ((smu)->ppt_funcs->check_powerplay_table ? (smu)->ppt_funcs->check_powerplay_table((smu)) : 0) +#define smu_append_powerplay_table(smu) \ + ((smu)->ppt_funcs->append_powerplay_table ? (smu)->ppt_funcs->append_powerplay_table((smu)) : 0) +#define smu_set_default_dpm_table(smu) \ + ((smu)->ppt_funcs->set_default_dpm_table ? (smu)->ppt_funcs->set_default_dpm_table((smu)) : 0) +#define smu_populate_umd_state_clk(smu) \ + ((smu)->ppt_funcs->populate_umd_state_clk ? (smu)->ppt_funcs->populate_umd_state_clk((smu)) : 0) +#define smu_set_default_od8_settings(smu) \ + ((smu)->ppt_funcs->set_default_od8_settings ? (smu)->ppt_funcs->set_default_od8_settings((smu)) : 0) + +#define smu_get_current_clk_freq(smu, clk_id, value) \ + ((smu)->ppt_funcs->get_current_clk_freq? (smu)->ppt_funcs->get_current_clk_freq((smu), (clk_id), (value)) : 0) + +#define smu_tables_init(smu, tab) \ + ((smu)->ppt_funcs->tables_init ? (smu)->ppt_funcs->tables_init((smu), (tab)) : 0) +#define smu_set_thermal_fan_table(smu) \ + ((smu)->ppt_funcs->set_thermal_fan_table ? (smu)->ppt_funcs->set_thermal_fan_table((smu)) : 0) +#define smu_start_thermal_control(smu) \ + ((smu)->ppt_funcs->start_thermal_control? (smu)->ppt_funcs->start_thermal_control((smu)) : 0) +#define smu_stop_thermal_control(smu) \ + ((smu)->ppt_funcs->stop_thermal_control? (smu)->ppt_funcs->stop_thermal_control((smu)) : 0) + +#define smu_smc_read_sensor(smu, sensor, data, size) \ + ((smu)->ppt_funcs->read_sensor? (smu)->ppt_funcs->read_sensor((smu), (sensor), (data), (size)) : -EINVAL) + +#define smu_pre_display_config_changed(smu) \ + ((smu)->ppt_funcs->pre_display_config_changed ? (smu)->ppt_funcs->pre_display_config_changed((smu)) : 0) +#define smu_display_config_changed(smu) \ + ((smu)->ppt_funcs->display_config_changed ? (smu)->ppt_funcs->display_config_changed((smu)) : 0) +#define smu_apply_clocks_adjust_rules(smu) \ + ((smu)->ppt_funcs->apply_clocks_adjust_rules ? (smu)->ppt_funcs->apply_clocks_adjust_rules((smu)) : 0) +#define smu_notify_smc_dispaly_config(smu) \ + ((smu)->ppt_funcs->notify_smc_dispaly_config ? (smu)->ppt_funcs->notify_smc_dispaly_config((smu)) : 0) +#define smu_force_dpm_limit_value(smu, highest) \ + ((smu)->ppt_funcs->force_dpm_limit_value ? (smu)->ppt_funcs->force_dpm_limit_value((smu), (highest)) : 0) +#define smu_unforce_dpm_levels(smu) \ + ((smu)->ppt_funcs->unforce_dpm_levels ? (smu)->ppt_funcs->unforce_dpm_levels((smu)) : 0) +#define smu_get_profiling_clk_mask(smu, level, sclk_mask, mclk_mask, soc_mask) \ + ((smu)->ppt_funcs->get_profiling_clk_mask ? (smu)->ppt_funcs->get_profiling_clk_mask((smu), (level), (sclk_mask), (mclk_mask), (soc_mask)) : 0) +#define smu_set_cpu_power_state(smu) \ + ((smu)->ppt_funcs->set_cpu_power_state ? (smu)->ppt_funcs->set_cpu_power_state((smu)) : 0) + +#define smu_msg_get_index(smu, msg) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_msg_index? (smu)->ppt_funcs->get_smu_msg_index((smu), (msg)) : -EINVAL) : -EINVAL) +#define smu_clk_get_index(smu, msg) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_clk_index? (smu)->ppt_funcs->get_smu_clk_index((smu), (msg)) : -EINVAL) : -EINVAL) +#define smu_feature_get_index(smu, msg) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_feature_index? (smu)->ppt_funcs->get_smu_feature_index((smu), (msg)) : -EINVAL) : -EINVAL) +#define smu_table_get_index(smu, tab) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_table_index? (smu)->ppt_funcs->get_smu_table_index((smu), (tab)) : -EINVAL) : -EINVAL) +#define smu_power_get_index(smu, src) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_smu_power_index? (smu)->ppt_funcs->get_smu_power_index((smu), (src)) : -EINVAL) : -EINVAL) +#define smu_workload_get_type(smu, profile) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_workload_type? (smu)->ppt_funcs->get_workload_type((smu), (profile)) : -EINVAL) : -EINVAL) +#define smu_run_btc(smu) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->run_btc? (smu)->ppt_funcs->run_btc((smu)) : 0) : 0) +#define smu_get_allowed_feature_mask(smu, feature_mask, num) \ + ((smu)->ppt_funcs? ((smu)->ppt_funcs->get_allowed_feature_mask? (smu)->ppt_funcs->get_allowed_feature_mask((smu), (feature_mask), (num)) : 0) : 0) + + +#define smu_store_cc6_data(smu, st, cc6_dis, pst_dis, pst_sw_dis) \ + ((smu)->ppt_funcs->store_cc6_data ? (smu)->ppt_funcs->store_cc6_data((smu), (st), (cc6_dis), (pst_dis), (pst_sw_dis)) : 0) + +#define smu_get_dal_power_level(smu, clocks) \ + ((smu)->ppt_funcs->get_dal_power_level ? (smu)->ppt_funcs->get_dal_power_level((smu), (clocks)) : 0) +#define smu_get_perf_level(smu, designation, level) \ + ((smu)->ppt_funcs->get_perf_level ? (smu)->ppt_funcs->get_perf_level((smu), (designation), (level)) : 0) +#define smu_get_current_shallow_sleep_clocks(smu, clocks) \ + ((smu)->ppt_funcs->get_current_shallow_sleep_clocks ? (smu)->ppt_funcs->get_current_shallow_sleep_clocks((smu), (clocks)) : 0) + +#define smu_dpm_set_uvd_enable(smu, enable) \ + ((smu)->ppt_funcs->dpm_set_uvd_enable ? (smu)->ppt_funcs->dpm_set_uvd_enable((smu), (enable)) : 0) +#define smu_dpm_set_vce_enable(smu, enable) \ + ((smu)->ppt_funcs->dpm_set_vce_enable ? (smu)->ppt_funcs->dpm_set_vce_enable((smu), (enable)) : 0) + +#define smu_set_watermarks_table(smu, tab, clock_ranges) \ + ((smu)->ppt_funcs->set_watermarks_table ? (smu)->ppt_funcs->set_watermarks_table((smu), (tab), (clock_ranges)) : 0) +#define smu_get_current_clk_freq_by_table(smu, clk_type, value) \ + ((smu)->ppt_funcs->get_current_clk_freq_by_table ? (smu)->ppt_funcs->get_current_clk_freq_by_table((smu), (clk_type), (value)) : 0) +#define smu_thermal_temperature_range_update(smu, range, rw) \ + ((smu)->ppt_funcs->thermal_temperature_range_update? (smu)->ppt_funcs->thermal_temperature_range_update((smu), (range), (rw)) : 0) +#define smu_get_thermal_temperature_range(smu, range) \ + ((smu)->ppt_funcs->get_thermal_temperature_range? (smu)->ppt_funcs->get_thermal_temperature_range((smu), (range)) : 0) +#define smu_register_irq_handler(smu) \ + ((smu)->ppt_funcs->register_irq_handler ? (smu)->ppt_funcs->register_irq_handler(smu) : 0) + +#define smu_get_dpm_ultimate_freq(smu, param, min, max) \ + ((smu)->ppt_funcs->get_dpm_ultimate_freq ? (smu)->ppt_funcs->get_dpm_ultimate_freq((smu), (param), (min), (max)) : 0) + +#define smu_asic_set_performance_level(smu, level) \ + ((smu)->ppt_funcs->set_performance_level? (smu)->ppt_funcs->set_performance_level((smu), (level)) : -EINVAL); +#define smu_dump_pptable(smu) \ + ((smu)->ppt_funcs->dump_pptable ? (smu)->ppt_funcs->dump_pptable((smu)) : 0) +#define smu_get_dpm_clk_limited(smu, clk_type, dpm_level, freq) \ + ((smu)->ppt_funcs->get_dpm_clk_limited ? (smu)->ppt_funcs->get_dpm_clk_limited((smu), (clk_type), (dpm_level), (freq)) : -EINVAL) + +#define smu_set_soft_freq_limited_range(smu, clk_type, min, max) \ + ((smu)->ppt_funcs->set_soft_freq_limited_range ? (smu)->ppt_funcs->set_soft_freq_limited_range((smu), (clk_type), (min), (max)) : -EINVAL) + +#define smu_override_pcie_parameters(smu) \ + ((smu)->ppt_funcs->override_pcie_parameters ? (smu)->ppt_funcs->override_pcie_parameters((smu)) : 0) + +#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) \ + ((smu)->ppt_funcs->update_pcie_parameters ? (smu)->ppt_funcs->update_pcie_parameters((smu), (pcie_gen_cap), (pcie_width_cap)) : 0) + +#endif diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c index c5257ae3188a..fc9679ea2368 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c @@ -24,17 +24,19 @@ #include <linux/module.h> #include <linux/pci.h> +#define SMU_11_0_PARTIAL_PPTABLE + #include "pp_debug.h" #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" +#include "smu_v11_0_pptable.h" #include "soc15_common.h" #include "atom.h" -#include "vega20_ppt.h" -#include "arcturus_ppt.h" -#include "navi10_ppt.h" +#include "amd_pcie.h" #include "asic_reg/thm/thm_11_0_2_offset.h" #include "asic_reg/thm/thm_11_0_2_sh_mask.h" @@ -61,7 +63,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu, return 0; } -static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) +int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg) { struct amdgpu_device *adev = smu->adev; @@ -88,7 +90,7 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu) return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO; } -static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg) +int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg) { struct amdgpu_device *adev = smu->adev; int ret = 0, index = 0; @@ -113,7 +115,7 @@ static int smu_v11_0_send_msg(struct smu_context *smu, uint16_t msg) } -static int +int smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, uint32_t param) { @@ -144,7 +146,7 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, return ret; } -static int smu_v11_0_init_microcode(struct smu_context *smu) +int smu_v11_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; const char *chip_name; @@ -206,7 +208,7 @@ out: return err; } -static int smu_v11_0_load_microcode(struct smu_context *smu) +int smu_v11_0_load_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; const uint32_t *src; @@ -244,7 +246,7 @@ static int smu_v11_0_load_microcode(struct smu_context *smu) return 0; } -static int smu_v11_0_check_fw_status(struct smu_context *smu) +int smu_v11_0_check_fw_status(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t mp1_fw_flags; @@ -259,7 +261,7 @@ static int smu_v11_0_check_fw_status(struct smu_context *smu) return -EIO; } -static int smu_v11_0_check_fw_version(struct smu_context *smu) +int smu_v11_0_check_fw_version(struct smu_context *smu) { uint32_t if_version = 0xff, smu_version = 0xff; uint16_t smu_major; @@ -354,7 +356,7 @@ static int smu_v11_0_set_pptable_v2_1(struct smu_context *smu, void **table, return 0; } -static int smu_v11_0_setup_pptable(struct smu_context *smu) +int smu_v11_0_setup_pptable(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; const struct smc_firmware_header_v1_0 *hdr; @@ -369,6 +371,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu) version_major = le16_to_cpu(hdr->header.header_version_major); version_minor = le16_to_cpu(hdr->header.header_version_minor); if (version_major == 2 && smu->smu_table.boot_values.pp_table_id > 0) { + pr_info("use driver provided pptable %d\n", smu->smu_table.boot_values.pp_table_id); switch (version_minor) { case 0: ret = smu_v11_0_set_pptable_v2_0(smu, &table, &size); @@ -385,6 +388,7 @@ static int smu_v11_0_setup_pptable(struct smu_context *smu) return ret; } else { + pr_info("use vbios provided pptable\n"); index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, powerplayinfo); @@ -433,13 +437,13 @@ static int smu_v11_0_fini_dpm_context(struct smu_context *smu) return 0; } -static int smu_v11_0_init_smc_tables(struct smu_context *smu) +int smu_v11_0_init_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = NULL; int ret = 0; - if (smu_table->tables || smu_table->table_count == 0) + if (smu_table->tables) return -EINVAL; tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table), @@ -460,18 +464,17 @@ static int smu_v11_0_init_smc_tables(struct smu_context *smu) return 0; } -static int smu_v11_0_fini_smc_tables(struct smu_context *smu) +int smu_v11_0_fini_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; int ret = 0; - if (!smu_table->tables || smu_table->table_count == 0) + if (!smu_table->tables) return -EINVAL; kfree(smu_table->tables); kfree(smu_table->metrics_table); smu_table->tables = NULL; - smu_table->table_count = 0; smu_table->metrics_table = NULL; smu_table->metrics_time = 0; @@ -481,7 +484,7 @@ static int smu_v11_0_fini_smc_tables(struct smu_context *smu) return 0; } -static int smu_v11_0_init_power(struct smu_context *smu) +int smu_v11_0_init_power(struct smu_context *smu) { struct smu_power_context *smu_power = &smu->smu_power; @@ -499,7 +502,7 @@ static int smu_v11_0_init_power(struct smu_context *smu) return 0; } -static int smu_v11_0_fini_power(struct smu_context *smu) +int smu_v11_0_fini_power(struct smu_context *smu) { struct smu_power_context *smu_power = &smu->smu_power; @@ -576,7 +579,7 @@ int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu) return 0; } -static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu) +int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu) { int ret, index; struct amdgpu_device *adev = smu->adev; @@ -673,7 +676,7 @@ static int smu_v11_0_get_clk_info_from_vbios(struct smu_context *smu) return 0; } -static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) +int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *memory_pool = &smu_table->memory_pool; @@ -719,7 +722,7 @@ static int smu_v11_0_notify_memory_pool_location(struct smu_context *smu) return ret; } -static int smu_v11_0_check_pptable(struct smu_context *smu) +int smu_v11_0_check_pptable(struct smu_context *smu) { int ret; @@ -727,7 +730,7 @@ static int smu_v11_0_check_pptable(struct smu_context *smu) return ret; } -static int smu_v11_0_parse_pptable(struct smu_context *smu) +int smu_v11_0_parse_pptable(struct smu_context *smu) { int ret; @@ -751,7 +754,7 @@ static int smu_v11_0_parse_pptable(struct smu_context *smu) return ret; } -static int smu_v11_0_populate_smc_pptable(struct smu_context *smu) +int smu_v11_0_populate_smc_pptable(struct smu_context *smu) { int ret; @@ -760,7 +763,7 @@ static int smu_v11_0_populate_smc_pptable(struct smu_context *smu) return ret; } -static int smu_v11_0_write_pptable(struct smu_context *smu) +int smu_v11_0_write_pptable(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; int ret = 0; @@ -771,24 +774,7 @@ static int smu_v11_0_write_pptable(struct smu_context *smu) return ret; } -static int smu_v11_0_write_watermarks_table(struct smu_context *smu) -{ - int ret = 0; - struct smu_table_context *smu_table = &smu->smu_table; - struct smu_table *table = NULL; - - table = &smu_table->tables[SMU_TABLE_WATERMARKS]; - - if (!table->cpu_addr) - return -EINVAL; - - ret = smu_update_table(smu, SMU_TABLE_WATERMARKS, 0, table->cpu_addr, - true); - - return ret; -} - -static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) +int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk) { int ret; @@ -800,7 +786,7 @@ static int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t cl return ret; } -static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) +int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -809,11 +795,10 @@ static int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context *smu) if (!table_context) return -EINVAL; - return smu_set_deep_sleep_dcefclk(smu, - table_context->boot_values.dcefclk / 100); + return smu_v11_0_set_deep_sleep_dcefclk(smu, table_context->boot_values.dcefclk / 100); } -static int smu_v11_0_set_tool_table_location(struct smu_context *smu) +int smu_v11_0_set_tool_table_location(struct smu_context *smu) { int ret = 0; struct smu_table *tool_table = &smu->smu_table.tables[SMU_TABLE_PMSTATUSLOG]; @@ -831,7 +816,7 @@ static int smu_v11_0_set_tool_table_location(struct smu_context *smu) return ret; } -static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) +int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) { int ret = 0; @@ -843,7 +828,7 @@ static int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count) } -static int smu_v11_0_set_allowed_mask(struct smu_context *smu) +int smu_v11_0_set_allowed_mask(struct smu_context *smu) { struct smu_feature *feature = &smu->smu_feature; int ret = 0; @@ -870,7 +855,7 @@ failed: return ret; } -static int smu_v11_0_get_enabled_mask(struct smu_context *smu, +int smu_v11_0_get_enabled_mask(struct smu_context *smu, uint32_t *feature_mask, uint32_t num) { uint32_t feature_mask_high = 0, feature_mask_low = 0; @@ -899,7 +884,7 @@ static int smu_v11_0_get_enabled_mask(struct smu_context *smu, return ret; } -static int smu_v11_0_system_features_control(struct smu_context *smu, +int smu_v11_0_system_features_control(struct smu_context *smu, bool en) { struct smu_feature *feature = &smu->smu_feature; @@ -925,7 +910,7 @@ static int smu_v11_0_system_features_control(struct smu_context *smu, return ret; } -static int smu_v11_0_notify_display_change(struct smu_context *smu) +int smu_v11_0_notify_display_change(struct smu_context *smu) { int ret = 0; @@ -983,7 +968,7 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock, return ret; } -static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) +int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) { struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks; int ret = 0; @@ -1063,13 +1048,44 @@ static int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu) return 0; } -static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) +uint32_t smu_v11_0_get_max_power_limit(struct smu_context *smu) { + uint32_t od_limit, max_power_limit; + struct smu_11_0_powerplay_table *powerplay_table = NULL; + struct smu_table_context *table_context = &smu->smu_table; + powerplay_table = table_context->power_play_table; + + max_power_limit = smu_get_pptable_power_limit(smu); + + if (!max_power_limit) { + // If we couldn't get the table limit, fall back on first-read value + if (!smu->default_power_limit) + smu->default_power_limit = smu->power_limit; + max_power_limit = smu->default_power_limit; + } + + if (smu->od_enabled) { + od_limit = le32_to_cpu(powerplay_table->overdrive_table.max[SMU_11_0_ODSETTING_POWERPERCENTAGE]); + + pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit, smu->default_power_limit); + + max_power_limit *= (100 + od_limit); + max_power_limit /= 100; + } + + return max_power_limit; +} + +int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) { int ret = 0; + uint32_t max_power_limit; - if (n > smu->default_power_limit) { - pr_err("New power limit is over the max allowed %d\n", - smu->default_power_limit); + max_power_limit = smu_v11_0_get_max_power_limit(smu); + + if (n > max_power_limit) { + pr_err("New power limit (%d) is over the max allowed %d\n", + n, + max_power_limit); return -EINVAL; } @@ -1091,7 +1107,7 @@ static int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n) return 0; } -static int smu_v11_0_get_current_clk_freq(struct smu_context *smu, +int smu_v11_0_get_current_clk_freq(struct smu_context *smu, enum smu_clk_type clk_id, uint32_t *value) { @@ -1170,7 +1186,7 @@ static int smu_v11_0_enable_thermal_alert(struct smu_context *smu) return 0; } -static int smu_v11_0_start_thermal_control(struct smu_context *smu) +int smu_v11_0_start_thermal_control(struct smu_context *smu) { int ret = 0; struct smu_temperature_range range; @@ -1212,6 +1228,15 @@ static int smu_v11_0_start_thermal_control(struct smu_context *smu) return ret; } +int smu_v11_0_stop_thermal_control(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + WREG32_SOC15(THM, 0, mmTHM_THERMAL_INT_ENA, 0); + + return 0; +} + static uint16_t convert_to_vddc(uint8_t vid) { return (uint16_t) ((6200 - (vid * 25)) / SMU11_VOLTAGE_SCALE); @@ -1236,7 +1261,7 @@ static int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value) } -static int smu_v11_0_read_sensor(struct smu_context *smu, +int smu_v11_0_read_sensor(struct smu_context *smu, enum amd_pp_sensors sensor, void *data, uint32_t *size) { @@ -1273,7 +1298,7 @@ static int smu_v11_0_read_sensor(struct smu_context *smu, return ret; } -static int +int smu_v11_0_display_clock_voltage_request(struct smu_context *smu, struct pp_display_clock_request *clock_req) @@ -1316,9 +1341,7 @@ smu_v11_0_display_clock_voltage_request(struct smu_context *smu, if (clk_select == SMU_UCLK && smu->disable_uclk_switch) return 0; - mutex_lock(&smu->mutex); ret = smu_set_hard_freq_range(smu, clk_select, clk_freq, 0); - mutex_unlock(&smu->mutex); if(clk_select == SMU_UCLK) smu->hard_min_uclk_req_from_dal = clk_freq; @@ -1328,27 +1351,7 @@ failed: return ret; } -static int -smu_v11_0_set_watermarks_for_clock_ranges(struct smu_context *smu, struct - dm_pp_wm_sets_with_clock_ranges_soc15 - *clock_ranges) -{ - int ret = 0; - struct smu_table *watermarks = &smu->smu_table.tables[SMU_TABLE_WATERMARKS]; - void *table = watermarks->cpu_addr; - - if (!smu->disable_watermark && - smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && - smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { - smu_set_watermarks_table(smu, table, clock_ranges); - smu->watermarks_bitmap |= WATERMARKS_EXIST; - smu->watermarks_bitmap &= ~WATERMARKS_LOADED; - } - - return ret; -} - -static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) +int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) { int ret = 0; struct amdgpu_device *adev = smu->adev; @@ -1361,12 +1364,10 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) case CHIP_NAVI12: if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; - mutex_lock(&smu->mutex); if (enable) ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); else ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); - mutex_unlock(&smu->mutex); break; default: break; @@ -1375,7 +1376,7 @@ static int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable) return ret; } -static uint32_t +uint32_t smu_v11_0_get_fan_control_mode(struct smu_context *smu) { if (!smu_feature_is_enabled(smu, SMU_FEATURE_FAN_CONTROL_BIT)) @@ -1415,7 +1416,7 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) return 0; } -static int +int smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) { struct amdgpu_device *adev = smu->adev; @@ -1444,7 +1445,7 @@ smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); } -static int +int smu_v11_0_set_fan_control_mode(struct smu_context *smu, uint32_t mode) { @@ -1472,7 +1473,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu, return ret; } -static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, +int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed) { struct amdgpu_device *adev = smu->adev; @@ -1482,10 +1483,9 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, if (!speed) return -EINVAL; - mutex_lock(&(smu->mutex)); ret = smu_v11_0_auto_fan_control(smu, 0); if (ret) - goto set_fan_speed_rpm_failed; + return ret; crystal_clock_freq = amdgpu_asic_get_xclk(adev); tach_period = 60 * crystal_clock_freq * 10000 / (8 * speed); @@ -1496,23 +1496,16 @@ static int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, ret = smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC_RPM); -set_fan_speed_rpm_failed: - mutex_unlock(&(smu->mutex)); return ret; } -#define XGMI_STATE_D0 1 -#define XGMI_STATE_D3 0 - -static int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, +int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, uint32_t pstate) { int ret = 0; - mutex_lock(&(smu->mutex)); ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetXgmiMode, - pstate ? XGMI_STATE_D0 : XGMI_STATE_D3); - mutex_unlock(&(smu->mutex)); + pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3); return ret; } @@ -1559,7 +1552,7 @@ static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs = .process = smu_v11_0_irq_process, }; -static int smu_v11_0_register_irq_handler(struct smu_context *smu) +int smu_v11_0_register_irq_handler(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; struct amdgpu_irq_src *irq_src = smu->irq_source; @@ -1591,7 +1584,7 @@ static int smu_v11_0_register_irq_handler(struct smu_context *smu) return ret; } -static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, +int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks) { struct smu_table_context *table_context = &smu->smu_table; @@ -1621,13 +1614,11 @@ static int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, return 0; } -static int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) +int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) { int ret = 0; - mutex_lock(&smu->mutex); ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME); - mutex_unlock(&smu->mutex); return ret; } @@ -1637,7 +1628,7 @@ static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq); } -static bool smu_v11_0_baco_is_support(struct smu_context *smu) +bool smu_v11_0_baco_is_support(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; struct smu_baco_context *smu_baco = &smu->smu_baco; @@ -1661,7 +1652,7 @@ static bool smu_v11_0_baco_is_support(struct smu_context *smu) return false; } -static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) +enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; enum smu_baco_state baco_state; @@ -1673,7 +1664,7 @@ static enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) return baco_state; } -static int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) +int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) { struct smu_baco_context *smu_baco = &smu->smu_baco; @@ -1697,7 +1688,7 @@ out: return ret; } -static int smu_v11_0_baco_reset(struct smu_context *smu) +int smu_v11_0_baco_reset(struct smu_context *smu) { int ret = 0; @@ -1718,13 +1709,12 @@ static int smu_v11_0_baco_reset(struct smu_context *smu) return ret; } -static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, +int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max) { int ret = 0, clk_id = 0; uint32_t param = 0; - mutex_lock(&smu->mutex); clk_id = smu_clk_get_index(smu, clk_type); if (clk_id < 0) { ret = -EINVAL; @@ -1751,80 +1741,102 @@ static int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk } failed: - mutex_unlock(&smu->mutex); return ret; } -static const struct smu_funcs smu_v11_0_funcs = { - .init_microcode = smu_v11_0_init_microcode, - .load_microcode = smu_v11_0_load_microcode, - .check_fw_status = smu_v11_0_check_fw_status, - .check_fw_version = smu_v11_0_check_fw_version, - .send_smc_msg = smu_v11_0_send_msg, - .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, - .read_smc_arg = smu_v11_0_read_arg, - .setup_pptable = smu_v11_0_setup_pptable, - .init_smc_tables = smu_v11_0_init_smc_tables, - .fini_smc_tables = smu_v11_0_fini_smc_tables, - .init_power = smu_v11_0_init_power, - .fini_power = smu_v11_0_fini_power, - .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, - .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios, - .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, - .check_pptable = smu_v11_0_check_pptable, - .parse_pptable = smu_v11_0_parse_pptable, - .populate_smc_tables = smu_v11_0_populate_smc_pptable, - .write_pptable = smu_v11_0_write_pptable, - .write_watermarks_table = smu_v11_0_write_watermarks_table, - .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, - .set_tool_table_location = smu_v11_0_set_tool_table_location, - .init_display_count = smu_v11_0_init_display_count, - .set_allowed_mask = smu_v11_0_set_allowed_mask, - .get_enabled_mask = smu_v11_0_get_enabled_mask, - .system_features_control = smu_v11_0_system_features_control, - .notify_display_change = smu_v11_0_notify_display_change, - .set_power_limit = smu_v11_0_set_power_limit, - .get_current_clk_freq = smu_v11_0_get_current_clk_freq, - .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, - .start_thermal_control = smu_v11_0_start_thermal_control, - .read_sensor = smu_v11_0_read_sensor, - .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, - .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, - .set_watermarks_for_clock_ranges = smu_v11_0_set_watermarks_for_clock_ranges, - .get_fan_control_mode = smu_v11_0_get_fan_control_mode, - .set_fan_control_mode = smu_v11_0_set_fan_control_mode, - .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, - .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, - .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, - .gfx_off_control = smu_v11_0_gfx_off_control, - .register_irq_handler = smu_v11_0_register_irq_handler, - .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, - .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, - .baco_is_support = smu_v11_0_baco_is_support, - .baco_get_state = smu_v11_0_baco_get_state, - .baco_set_state = smu_v11_0_baco_set_state, - .baco_reset = smu_v11_0_baco_reset, - .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, -}; +int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max) +{ + int ret = 0, clk_id = 0; + uint32_t param; -void smu_v11_0_set_smu_funcs(struct smu_context *smu) + clk_id = smu_clk_get_index(smu, clk_type); + if (clk_id < 0) + return clk_id; + + if (max > 0) { + param = (uint32_t)((clk_id << 16) | (max & 0xffff)); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, + param); + if (ret) + return ret; + } + + if (min > 0) { + param = (uint32_t)((clk_id << 16) | (min & 0xffff)); + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq, + param); + if (ret) + return ret; + } + + return ret; +} + +int smu_v11_0_override_pcie_parameters(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; + uint32_t pcie_gen = 0, pcie_width = 0; + int ret; - smu->funcs = &smu_v11_0_funcs; - switch (adev->asic_type) { - case CHIP_VEGA20: - vega20_set_ppt_funcs(smu); - break; - case CHIP_ARCTURUS: - arcturus_set_ppt_funcs(smu); - break; - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - navi10_set_ppt_funcs(smu); - break; - default: - pr_warn("Unknown asic for smu11\n"); + if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4) + pcie_gen = 3; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) + pcie_gen = 2; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) + pcie_gen = 1; + else if (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1) + pcie_gen = 0; + + /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1 + * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4 + * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32 + */ + if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + pcie_width = 6; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X12) + pcie_width = 5; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X8) + pcie_width = 4; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X4) + pcie_width = 3; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X2) + pcie_width = 2; + else if (adev->pm.pcie_mlw_mask & CAIL_PCIE_LINK_WIDTH_SUPPORT_X1) + pcie_width = 1; + + ret = smu_update_pcie_parameters(smu, pcie_gen, pcie_width); + + if (ret) + pr_err("[%s] Attempt to override pcie params failed!\n", __func__); + + return ret; + +} + +int smu_v11_0_set_default_od_settings(struct smu_context *smu, bool initialize, size_t overdrive_table_size) +{ + struct smu_table_context *table_context = &smu->smu_table; + int ret = 0; + + if (initialize) { + if (table_context->overdrive_table) { + return -EINVAL; + } + table_context->overdrive_table = kzalloc(overdrive_table_size, GFP_KERNEL); + if (!table_context->overdrive_table) { + return -ENOMEM; + } + ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, false); + if (ret) { + pr_err("Failed to export overdrive table!\n"); + return ret; + } } + ret = smu_update_table(smu, SMU_TABLE_OVERDRIVE, 0, table_context->overdrive_table, true); + if (ret) { + pr_err("Failed to import overdrive table!\n"); + return ret; + } + return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c index 9d2280ca1f4b..139dd737eaa5 100644 --- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c +++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c @@ -24,12 +24,12 @@ #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v12_0.h" #include "soc15_common.h" #include "atom.h" -#include "renoir_ppt.h" #include "asic_reg/mp/mp_12_0_0_offset.h" #include "asic_reg/mp/mp_12_0_0_sh_mask.h" @@ -41,7 +41,7 @@ #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1 -static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, +int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, uint16_t msg) { struct amdgpu_device *adev = smu->adev; @@ -50,7 +50,7 @@ static int smu_v12_0_send_msg_without_waiting(struct smu_context *smu, return 0; } -static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) +int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) { struct amdgpu_device *adev = smu->adev; @@ -58,7 +58,7 @@ static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg) return 0; } -static int smu_v12_0_wait_for_response(struct smu_context *smu) +int smu_v12_0_wait_for_response(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t cur_value, i; @@ -77,7 +77,7 @@ static int smu_v12_0_wait_for_response(struct smu_context *smu) return RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90) == 0x1 ? 0 : -EIO; } -static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg) +int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg) { struct amdgpu_device *adev = smu->adev; int ret = 0, index = 0; @@ -102,7 +102,7 @@ static int smu_v12_0_send_msg(struct smu_context *smu, uint16_t msg) } -static int +int smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, uint32_t param) { @@ -132,7 +132,7 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu, uint16_t msg, return ret; } -static int smu_v12_0_check_fw_status(struct smu_context *smu) +int smu_v12_0_check_fw_status(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t mp1_fw_flags; @@ -147,7 +147,7 @@ static int smu_v12_0_check_fw_status(struct smu_context *smu) return -EIO; } -static int smu_v12_0_check_fw_version(struct smu_context *smu) +int smu_v12_0_check_fw_version(struct smu_context *smu) { uint32_t if_version = 0xff, smu_version = 0xff; uint16_t smu_major; @@ -181,7 +181,7 @@ static int smu_v12_0_check_fw_version(struct smu_context *smu) return ret; } -static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) +int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) { if (!(smu->adev->flags & AMD_IS_APU)) return 0; @@ -192,7 +192,7 @@ static int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate) return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma); } -static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) +int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) { if (!(smu->adev->flags & AMD_IS_APU)) return 0; @@ -203,7 +203,7 @@ static int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate) return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn); } -static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) +int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) { if (!(smu->adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) return 0; @@ -224,7 +224,7 @@ static int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable) * Returns 2=Not in GFXOFF. * Returns 3=Transition into GFXOFF. */ -static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu) +uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu) { uint32_t reg; uint32_t gfxOff_Status = 0; @@ -237,22 +237,13 @@ static uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu) return gfxOff_Status; } -static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) +int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) { int ret = 0, timeout = 500; if (enable) { ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff); - /* confirm gfx is back to "off" state, timeout is 5 seconds */ - while (!(smu_v12_0_get_gfxoff_status(smu) == 0)) { - msleep(10); - timeout--; - if (timeout == 0) { - DRM_ERROR("enable gfxoff timeout and failed!\n"); - break; - } - } } else { ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff); @@ -270,12 +261,12 @@ static int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable) return ret; } -static int smu_v12_0_init_smc_tables(struct smu_context *smu) +int smu_v12_0_init_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *tables = NULL; - if (smu_table->tables || smu_table->table_count == 0) + if (smu_table->tables) return -EINVAL; tables = kcalloc(SMU_TABLE_COUNT, sizeof(struct smu_table), @@ -288,11 +279,11 @@ static int smu_v12_0_init_smc_tables(struct smu_context *smu) return smu_tables_init(smu, tables); } -static int smu_v12_0_fini_smc_tables(struct smu_context *smu) +int smu_v12_0_fini_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; - if (!smu_table->tables || smu_table->table_count == 0) + if (!smu_table->tables) return -EINVAL; kfree(smu_table->clocks_table); @@ -304,7 +295,7 @@ static int smu_v12_0_fini_smc_tables(struct smu_context *smu) return 0; } -static int smu_v12_0_populate_smc_tables(struct smu_context *smu) +int smu_v12_0_populate_smc_tables(struct smu_context *smu) { struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *table = NULL; @@ -319,14 +310,20 @@ static int smu_v12_0_populate_smc_tables(struct smu_context *smu) return smu_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); } -static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, +int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max) { int ret = 0; - - mutex_lock(&smu->mutex); + uint32_t mclk_mask, soc_mask; if (max) { + ret = smu_get_profiling_clk_mask(smu, AMD_DPM_FORCED_LEVEL_PROFILE_PEAK, + NULL, + &mclk_mask, + &soc_mask); + if (ret) + goto failed; + switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: @@ -340,14 +337,20 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk goto failed; break; case SMU_UCLK: - ret = smu_get_dpm_uclk_limited(smu, max, true); + case SMU_FCLK: + case SMU_MCLK: + ret = smu_get_dpm_clk_limited(smu, clk_type, mclk_mask, max); + if (ret) + goto failed; + break; + case SMU_SOCCLK: + ret = smu_get_dpm_clk_limited(smu, clk_type, soc_mask, max); if (ret) goto failed; break; default: ret = -EINVAL; goto failed; - } } @@ -365,7 +368,14 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk goto failed; break; case SMU_UCLK: - ret = smu_get_dpm_uclk_limited(smu, min, false); + case SMU_FCLK: + case SMU_MCLK: + ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min); + if (ret) + goto failed; + break; + case SMU_SOCCLK: + ret = smu_get_dpm_clk_limited(smu, clk_type, 0, min); if (ret) goto failed; break; @@ -373,40 +383,65 @@ static int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk ret = -EINVAL; goto failed; } - } failed: - mutex_unlock(&smu->mutex); return ret; } -static const struct smu_funcs smu_v12_0_funcs = { - .check_fw_status = smu_v12_0_check_fw_status, - .check_fw_version = smu_v12_0_check_fw_version, - .powergate_sdma = smu_v12_0_powergate_sdma, - .powergate_vcn = smu_v12_0_powergate_vcn, - .send_smc_msg = smu_v12_0_send_msg, - .send_smc_msg_with_param = smu_v12_0_send_msg_with_param, - .read_smc_arg = smu_v12_0_read_arg, - .set_gfx_cgpg = smu_v12_0_set_gfx_cgpg, - .gfx_off_control = smu_v12_0_gfx_off_control, - .init_smc_tables = smu_v12_0_init_smc_tables, - .fini_smc_tables = smu_v12_0_fini_smc_tables, - .populate_smc_tables = smu_v12_0_populate_smc_tables, - .get_dpm_ultimate_freq = smu_v12_0_get_dpm_ultimate_freq, -}; - -void smu_v12_0_set_smu_funcs(struct smu_context *smu) +int smu_v12_0_mode2_reset(struct smu_context *smu){ + return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2); +} + +int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max) { - struct amdgpu_device *adev = smu->adev; + int ret = 0; - smu->funcs = &smu_v12_0_funcs; + if (max < min) + return -EINVAL; - switch (adev->asic_type) { - case CHIP_RENOIR: - renoir_set_ppt_funcs(smu); - break; + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min); + if (ret) + return ret; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max); + if (ret) + return ret; + break; + case SMU_FCLK: + case SMU_MCLK: + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min); + if (ret) + return ret; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max); + if (ret) + return ret; + break; + case SMU_SOCCLK: + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min); + if (ret) + return ret; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max); + if (ret) + return ret; + break; + case SMU_VCLK: + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min); + if (ret) + return ret; + + ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max); + if (ret) + return ret; + break; default: - pr_warn("Unknown asic for smu12\n"); + return -EINVAL; } + + return ret; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c index 3f12cf341511..aa0ee2b46135 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu10_smumgr.c @@ -137,7 +137,7 @@ static int smu10_copy_table_from_smc(struct pp_hwmgr *hwmgr, priv->smu_tables.entry[table_id].table_id); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, (uint8_t *)priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c index 4728aa23a818..7dca04a89217 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu8_smumgr.c @@ -177,12 +177,10 @@ static int smu8_load_mec_firmware(struct pp_hwmgr *hwmgr) uint32_t tmp; int ret = 0; struct cgs_firmware_info info = {0}; - struct smu8_smumgr *smu8_smu; if (hwmgr == NULL || hwmgr->device == NULL) return -EINVAL; - smu8_smu = hwmgr->smu_backend; ret = cgs_get_firmware_info(hwmgr->device, CGS_UCODE_ID_CP_MEC, &info); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c index 0dbdde69f2d9..0f3836fd9666 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega10_smumgr.c @@ -58,7 +58,7 @@ static int vega10_copy_table_from_smc(struct pp_hwmgr *hwmgr, priv->smu_tables.entry[table_id].table_id); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c index f9589806bf83..90c782c132d2 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega12_smumgr.c @@ -66,7 +66,7 @@ static int vega12_copy_table_from_smc(struct pp_hwmgr *hwmgr, return -EINVAL); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c index b9089c6bea85..f604612f411f 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c @@ -189,7 +189,7 @@ static int vega20_copy_table_from_smc(struct pp_hwmgr *hwmgr, return ret); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, priv->smu_tables.entry[table_id].table, priv->smu_tables.entry[table_id].size); @@ -290,7 +290,7 @@ int vega20_get_activity_monitor_coeff(struct pp_hwmgr *hwmgr, return ret); /* flush hdp cache */ - adev->nbio_funcs->hdp_flush(adev, NULL); + adev->nbio.funcs->hdp_flush(adev, NULL); memcpy(table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].table, priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size); diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c index 92c393f613d3..0b4892833808 100644 --- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c @@ -25,6 +25,7 @@ #include <linux/firmware.h> #include "amdgpu.h" #include "amdgpu_smu.h" +#include "smu_internal.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" #include "smu_v11_0.h" @@ -143,6 +144,7 @@ static struct smu_11_0_cmn2aisc_mapping vega20_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(PrepareMp1ForShutdown), MSG_MAP(SetMGpuFanBoostLimitRpm), MSG_MAP(GetAVFSVoltageByDpm), + MSG_MAP(DFCstateControl), }; static struct smu_11_0_cmn2aisc_mapping vega20_clk_map[SMU_CLK_COUNT] = { @@ -464,7 +466,6 @@ static int vega20_store_powerplay_table(struct smu_context *smu) sizeof(PPTable_t)); table_context->thermal_controller_type = powerplay_table->ucThermalControllerType; - table_context->TDPODLimit = le32_to_cpu(powerplay_table->OverDrive8Table.ODSettingsMax[ATOM_VEGA20_ODSETTING_POWERPERCENTAGE]); return 0; } @@ -634,7 +635,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu) !smu_dpm_ctx->dpm_current_power_state) return -EINVAL; - mutex_lock(&(smu->mutex)); switch (smu_dpm_ctx->dpm_current_power_state->classification.ui_label) { case SMU_STATE_UI_LABEL_BATTERY: pm_type = POWER_STATE_TYPE_BATTERY; @@ -652,7 +652,6 @@ amd_pm_state_type vega20_get_current_power_state(struct smu_context *smu) pm_type = POWER_STATE_TYPE_DEFAULT; break; } - mutex_unlock(&(smu->mutex)); return pm_type; } @@ -1274,16 +1273,8 @@ static int vega20_force_clk_levels(struct smu_context *smu, struct vega20_dpm_table *dpm_table; struct vega20_single_dpm_table *single_dpm_table; uint32_t soft_min_level, soft_max_level, hard_min_level; - struct smu_dpm_context *smu_dpm = &smu->smu_dpm; int ret = 0; - if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { - pr_info("force clock level is for dpm manual mode only.\n"); - return -EINVAL; - } - - mutex_lock(&(smu->mutex)); - soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0; @@ -1436,7 +1427,6 @@ static int vega20_force_clk_levels(struct smu_context *smu, break; } - mutex_unlock(&(smu->mutex)); return ret; } @@ -1451,8 +1441,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu, dpm_table = smu_dpm->dpm_context; - mutex_lock(&smu->mutex); - switch (clk_type) { case SMU_GFXCLK: single_dpm_table = &(dpm_table->gfx_table); @@ -1474,7 +1462,6 @@ static int vega20_get_clock_by_type_with_latency(struct smu_context *smu, ret = -EINVAL; } - mutex_unlock(&smu->mutex); return ret; } @@ -2260,7 +2247,7 @@ vega20_notify_smc_dispaly_config(struct smu_context *smu) if (smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) { clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10; - if (!smu->funcs->display_clock_voltage_request(smu, &clock_req)) { + if (!smu_v11_0_display_clock_voltage_request(smu, &clock_req)) { if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) { ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetMinDeepSleepDcefclk, @@ -2547,8 +2534,6 @@ static int vega20_set_od_percentage(struct smu_context *smu, int feature_enabled; PPCLK_e clk_id; - mutex_lock(&(smu->mutex)); - dpm_table = smu_dpm->dpm_context; golden_table = smu_dpm->golden_dpm_context; @@ -2598,11 +2583,10 @@ static int vega20_set_od_percentage(struct smu_context *smu, } ret = smu_handle_task(smu, smu_dpm->dpm_level, - AMD_PP_TASK_READJUST_POWER_STATE); + AMD_PP_TASK_READJUST_POWER_STATE, + false); set_od_failed: - mutex_unlock(&(smu->mutex)); - return ret; } @@ -2827,10 +2811,9 @@ static int vega20_odn_edit_dpm_table(struct smu_context *smu, } if (type == PP_OD_COMMIT_DPM_TABLE) { - mutex_lock(&(smu->mutex)); ret = smu_handle_task(smu, smu_dpm->dpm_level, - AMD_PP_TASK_READJUST_POWER_STATE); - mutex_unlock(&(smu->mutex)); + AMD_PP_TASK_READJUST_POWER_STATE, + false); } return ret; @@ -3047,7 +3030,7 @@ static int vega20_read_sensor(struct smu_context *smu, *size = 4; break; default: - ret = smu_smc_read_sensor(smu, sensor, data, size); + ret = smu_v11_0_read_sensor(smu, sensor, data, size); } mutex_unlock(&smu->sensor_lock); @@ -3141,6 +3124,49 @@ static int vega20_get_thermal_temperature_range(struct smu_context *smu, return 0; } +static int vega20_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + uint32_t smu_version; + int ret; + + ret = smu_get_smc_version(smu, NULL, &smu_version); + if (ret) { + pr_err("Failed to get smu version!\n"); + return ret; + } + + /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */ + if (smu_version < 0x283200) { + pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n"); + return -EINVAL; + } + + return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state); +} + +static int vega20_update_pcie_parameters(struct smu_context *smu, + uint32_t pcie_gen_cap, + uint32_t pcie_width_cap) +{ + PPTable_t *pptable = smu->smu_table.driver_pptable; + int ret, i; + uint32_t smu_pcie_arg; + + for (i = 0; i < NUM_LINK_LEVELS; i++) { + smu_pcie_arg = (i << 16) | + ((pptable->PcieGenSpeed[i] <= pcie_gen_cap) ? (pptable->PcieGenSpeed[i] << 8) : + (pcie_gen_cap << 8)) | ((pptable->PcieLaneCount[i] <= pcie_width_cap) ? + pptable->PcieLaneCount[i] : pcie_width_cap); + ret = smu_send_smc_msg_with_param(smu, + SMU_MSG_OverridePcieParameters, + smu_pcie_arg); + } + + return ret; +} + + static const struct pptable_funcs vega20_ppt_funcs = { .tables_init = vega20_tables_init, .alloc_dpm_context = vega20_allocate_dpm_context, @@ -3153,7 +3179,7 @@ static const struct pptable_funcs vega20_ppt_funcs = { .get_smu_table_index = vega20_get_smu_table_index, .get_smu_power_index = vega20_get_pwr_src_index, .get_workload_type = vega20_get_workload_type, - .run_afll_btc = vega20_run_btc_afll, + .run_btc = vega20_run_btc_afll, .get_allowed_feature_mask = vega20_get_allowed_feature_mask, .get_current_power_state = vega20_get_current_power_state, .set_default_dpm_table = vega20_set_default_dpm_table, @@ -3183,13 +3209,61 @@ static const struct pptable_funcs vega20_ppt_funcs = { .get_fan_speed_percent = vega20_get_fan_speed_percent, .get_fan_speed_rpm = vega20_get_fan_speed_rpm, .set_watermarks_table = vega20_set_watermarks_table, - .get_thermal_temperature_range = vega20_get_thermal_temperature_range + .get_thermal_temperature_range = vega20_get_thermal_temperature_range, + .set_df_cstate = vega20_set_df_cstate, + .update_pcie_parameters = vega20_update_pcie_parameters, + .init_microcode = smu_v11_0_init_microcode, + .load_microcode = smu_v11_0_load_microcode, + .init_smc_tables = smu_v11_0_init_smc_tables, + .fini_smc_tables = smu_v11_0_fini_smc_tables, + .init_power = smu_v11_0_init_power, + .fini_power = smu_v11_0_fini_power, + .check_fw_status = smu_v11_0_check_fw_status, + .setup_pptable = smu_v11_0_setup_pptable, + .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values, + .get_clk_info_from_vbios = smu_v11_0_get_clk_info_from_vbios, + .check_pptable = smu_v11_0_check_pptable, + .parse_pptable = smu_v11_0_parse_pptable, + .populate_smc_tables = smu_v11_0_populate_smc_pptable, + .check_fw_version = smu_v11_0_check_fw_version, + .write_pptable = smu_v11_0_write_pptable, + .set_min_dcef_deep_sleep = smu_v11_0_set_min_dcef_deep_sleep, + .set_tool_table_location = smu_v11_0_set_tool_table_location, + .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, + .system_features_control = smu_v11_0_system_features_control, + .send_smc_msg = smu_v11_0_send_msg, + .send_smc_msg_with_param = smu_v11_0_send_msg_with_param, + .read_smc_arg = smu_v11_0_read_arg, + .init_display_count = smu_v11_0_init_display_count, + .set_allowed_mask = smu_v11_0_set_allowed_mask, + .get_enabled_mask = smu_v11_0_get_enabled_mask, + .notify_display_change = smu_v11_0_notify_display_change, + .set_power_limit = smu_v11_0_set_power_limit, + .get_current_clk_freq = smu_v11_0_get_current_clk_freq, + .init_max_sustainable_clocks = smu_v11_0_init_max_sustainable_clocks, + .start_thermal_control = smu_v11_0_start_thermal_control, + .stop_thermal_control = smu_v11_0_stop_thermal_control, + .set_deep_sleep_dcefclk = smu_v11_0_set_deep_sleep_dcefclk, + .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, + .get_fan_control_mode = smu_v11_0_get_fan_control_mode, + .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, + .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, + .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, + .gfx_off_control = smu_v11_0_gfx_off_control, + .register_irq_handler = smu_v11_0_register_irq_handler, + .set_azalia_d3_pme = smu_v11_0_set_azalia_d3_pme, + .get_max_sustainable_clocks_by_dc = smu_v11_0_get_max_sustainable_clocks_by_dc, + .baco_is_support= smu_v11_0_baco_is_support, + .baco_get_state = smu_v11_0_baco_get_state, + .baco_set_state = smu_v11_0_baco_set_state, + .baco_reset = smu_v11_0_baco_reset, + .get_dpm_ultimate_freq = smu_v11_0_get_dpm_ultimate_freq, + .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, + .override_pcie_parameters = smu_v11_0_override_pcie_parameters, }; void vega20_set_ppt_funcs(struct smu_context *smu) { - struct smu_table_context *smu_table = &smu->smu_table; - smu->ppt_funcs = &vega20_ppt_funcs; - smu_table->table_count = TABLE_COUNT; } diff --git a/drivers/gpu/drm/arc/arcpgu_drv.c b/drivers/gpu/drm/arc/arcpgu_drv.c index 6b7f791685ec..d6a6692db0ac 100644 --- a/drivers/gpu/drm/arc/arcpgu_drv.c +++ b/drivers/gpu/drm/arc/arcpgu_drv.c @@ -14,6 +14,7 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem_cma_helper.h> #include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include <linux/dma-mapping.h> #include <linux/module.h> @@ -45,7 +46,7 @@ static int arcpgu_load(struct drm_device *drm) { struct platform_device *pdev = to_platform_device(drm->dev); struct arcpgu_drm_private *arcpgu; - struct device_node *encoder_node; + struct device_node *encoder_node = NULL, *endpoint_node = NULL; struct resource *res; int ret; @@ -80,14 +81,23 @@ static int arcpgu_load(struct drm_device *drm) if (arc_pgu_setup_crtc(drm) < 0) return -ENODEV; - /* find the encoder node and initialize it */ - encoder_node = of_parse_phandle(drm->dev->of_node, "encoder-slave", 0); + /* + * There is only one output port inside each device. It is linked with + * encoder endpoint. + */ + endpoint_node = of_graph_get_next_endpoint(pdev->dev.of_node, NULL); + if (endpoint_node) { + encoder_node = of_graph_get_remote_port_parent(endpoint_node); + of_node_put(endpoint_node); + } + if (encoder_node) { ret = arcpgu_drm_hdmi_init(drm, encoder_node); of_node_put(encoder_node); if (ret < 0) return ret; } else { + dev_info(drm->dev, "no encoder found. Assumed virtual LCD on simulation platform\n"); ret = arcpgu_drm_sim_init(drm, NULL); if (ret < 0) return ret; diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c index 98aac743cc26..8fd7094beece 100644 --- a/drivers/gpu/drm/arc/arcpgu_hdmi.c +++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c @@ -5,6 +5,7 @@ * Copyright (C) 2016 Synopsys, Inc. (www.synopsys.com) */ +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_encoder.h> #include <drm/drm_device.h> diff --git a/drivers/gpu/drm/arm/display/Kconfig b/drivers/gpu/drm/arm/display/Kconfig index cec0639e3aa1..e87ff8623076 100644 --- a/drivers/gpu/drm/arm/display/Kconfig +++ b/drivers/gpu/drm/arm/display/Kconfig @@ -12,3 +12,9 @@ config DRM_KOMEDA Processor driver. It supports the D71 variants of the hardware. If compiled as a module it will be called komeda. + +config DRM_KOMEDA_ERROR_PRINT + bool "Enable komeda error print" + depends on DRM_KOMEDA + help + Choose this option to enable error printing. diff --git a/drivers/gpu/drm/arm/display/komeda/Makefile b/drivers/gpu/drm/arm/display/komeda/Makefile index 5c3900c2e764..f095a1c68ac7 100644 --- a/drivers/gpu/drm/arm/display/komeda/Makefile +++ b/drivers/gpu/drm/arm/display/komeda/Makefile @@ -22,4 +22,6 @@ komeda-y += \ d71/d71_dev.o \ d71/d71_component.o +komeda-$(CONFIG_DRM_KOMEDA_ERROR_PRINT) += komeda_event.o + obj-$(CONFIG_DRM_KOMEDA) += komeda.o diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c index 55a8cc94808a..f0ba26e282c3 100644 --- a/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_component.c @@ -106,6 +106,23 @@ static void dump_block_header(struct seq_file *sf, void __iomem *reg) i, hdr.output_ids[i]); } +/* On D71, we are using the global line size. From D32, every component have + * a line size register to indicate the fifo size. + */ +static u32 __get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg, + u32 max_default) +{ + if (!d71->periph_addr) + max_default = malidp_read32(reg, BLK_MAX_LINE_SIZE); + + return max_default; +} + +static u32 get_blk_line_size(struct d71_dev *d71, u32 __iomem *reg) +{ + return __get_blk_line_size(d71, reg, d71->max_line_size); +} + static u32 to_rot_ctrl(u32 rot) { u32 lr_ctrl = 0; @@ -332,7 +349,56 @@ static void d71_layer_dump(struct komeda_component *c, struct seq_file *sf) seq_printf(sf, "%sAD_V_CROP:\t\t0x%X\n", prefix, v[2]); } +static int d71_layer_validate(struct komeda_component *c, + struct komeda_component_state *state) +{ + struct komeda_layer_state *st = to_layer_st(state); + struct komeda_layer *layer = to_layer(c); + struct drm_plane_state *plane_st; + struct drm_framebuffer *fb; + u32 fourcc, line_sz, max_line_sz; + + plane_st = drm_atomic_get_new_plane_state(state->obj.state, + state->plane); + fb = plane_st->fb; + fourcc = fb->format->format; + + if (drm_rotation_90_or_270(st->rot)) + line_sz = st->vsize - st->afbc_crop_t - st->afbc_crop_b; + else + line_sz = st->hsize - st->afbc_crop_l - st->afbc_crop_r; + + if (fb->modifier) { + if ((fb->modifier & AFBC_FORMAT_MOD_BLOCK_SIZE_MASK) == + AFBC_FORMAT_MOD_BLOCK_SIZE_32x8) + max_line_sz = layer->line_sz; + else + max_line_sz = layer->line_sz / 2; + + if (line_sz > max_line_sz) { + DRM_DEBUG_ATOMIC("afbc request line_sz: %d exceed the max afbc line_sz: %d.\n", + line_sz, max_line_sz); + return -EINVAL; + } + } + + if (fourcc == DRM_FORMAT_YUV420_10BIT && line_sz > 2046 && (st->afbc_crop_l % 4)) { + DRM_DEBUG_ATOMIC("YUV420_10BIT input_hsize: %d exceed the max size 2046.\n", + line_sz); + return -EINVAL; + } + + if (fourcc == DRM_FORMAT_X0L2 && line_sz > 2046 && (st->addr[0] % 16)) { + DRM_DEBUG_ATOMIC("X0L2 input_hsize: %d exceed the max size 2046.\n", + line_sz); + return -EINVAL; + } + + return 0; +} + static const struct komeda_component_funcs d71_layer_funcs = { + .validate = d71_layer_validate, .update = d71_layer_update, .disable = d71_layer_disable, .dump_register = d71_layer_dump, @@ -365,7 +431,28 @@ static int d71_layer_init(struct d71_dev *d71, else layer->layer_type = KOMEDA_FMT_SIMPLE_LAYER; - set_range(&layer->hsize_in, 4, d71->max_line_size); + if (!d71->periph_addr) { + /* D32 or newer product */ + layer->line_sz = malidp_read32(reg, BLK_MAX_LINE_SIZE); + layer->yuv_line_sz = L_INFO_YUV_MAX_LINESZ(layer_info); + } else if (d71->max_line_size > 2048) { + /* D71 4K */ + layer->line_sz = d71->max_line_size; + layer->yuv_line_sz = layer->line_sz / 2; + } else { + /* D71 2K */ + if (layer->layer_type == KOMEDA_FMT_RICH_LAYER) { + /* rich layer is 4K configuration */ + layer->line_sz = d71->max_line_size * 2; + layer->yuv_line_sz = layer->line_sz / 2; + } else { + layer->line_sz = d71->max_line_size; + layer->yuv_line_sz = 0; + } + } + + set_range(&layer->hsize_in, 4, layer->line_sz); + set_range(&layer->vsize_in, 4, d71->max_vsize); malidp_write32(reg, LAYER_PALPHA, D71_PALPHA_DEF_MAP); @@ -456,9 +543,11 @@ static int d71_wb_layer_init(struct d71_dev *d71, wb_layer = to_layer(c); wb_layer->layer_type = KOMEDA_FMT_WB_LAYER; + wb_layer->line_sz = get_blk_line_size(d71, reg); + wb_layer->yuv_line_sz = wb_layer->line_sz; - set_range(&wb_layer->hsize_in, D71_MIN_LINE_SIZE, d71->max_line_size); - set_range(&wb_layer->vsize_in, D71_MIN_VERTICAL_SIZE, d71->max_vsize); + set_range(&wb_layer->hsize_in, 64, wb_layer->line_sz); + set_range(&wb_layer->vsize_in, 64, d71->max_vsize); return 0; } @@ -595,8 +684,8 @@ static int d71_compiz_init(struct d71_dev *d71, compiz = to_compiz(c); - set_range(&compiz->hsize, D71_MIN_LINE_SIZE, d71->max_line_size); - set_range(&compiz->vsize, D71_MIN_VERTICAL_SIZE, d71->max_vsize); + set_range(&compiz->hsize, 64, get_blk_line_size(d71, reg)); + set_range(&compiz->vsize, 64, d71->max_vsize); return 0; } @@ -703,7 +792,7 @@ static void d71_scaler_update(struct komeda_component *c, static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf) { - u32 v[9]; + u32 v[10]; dump_block_header(sf, c->reg); @@ -723,6 +812,18 @@ static void d71_scaler_dump(struct komeda_component *c, struct seq_file *sf) seq_printf(sf, "SC_H_DELTA_PH:\t\t0x%X\n", v[6]); seq_printf(sf, "SC_V_INIT_PH:\t\t0x%X\n", v[7]); seq_printf(sf, "SC_V_DELTA_PH:\t\t0x%X\n", v[8]); + + get_values_from_reg(c->reg, 0x130, 10, v); + seq_printf(sf, "SC_ENH_LIMITS:\t\t0x%X\n", v[0]); + seq_printf(sf, "SC_ENH_COEFF0:\t\t0x%X\n", v[1]); + seq_printf(sf, "SC_ENH_COEFF1:\t\t0x%X\n", v[2]); + seq_printf(sf, "SC_ENH_COEFF2:\t\t0x%X\n", v[3]); + seq_printf(sf, "SC_ENH_COEFF3:\t\t0x%X\n", v[4]); + seq_printf(sf, "SC_ENH_COEFF4:\t\t0x%X\n", v[5]); + seq_printf(sf, "SC_ENH_COEFF5:\t\t0x%X\n", v[6]); + seq_printf(sf, "SC_ENH_COEFF6:\t\t0x%X\n", v[7]); + seq_printf(sf, "SC_ENH_COEFF7:\t\t0x%X\n", v[8]); + seq_printf(sf, "SC_ENH_COEFF8:\t\t0x%X\n", v[9]); } static const struct komeda_component_funcs d71_scaler_funcs = { @@ -753,7 +854,7 @@ static int d71_scaler_init(struct d71_dev *d71, } scaler = to_scaler(c); - set_range(&scaler->hsize, 4, 2048); + set_range(&scaler->hsize, 4, __get_blk_line_size(d71, reg, 2048)); set_range(&scaler->vsize, 4, 4096); scaler->max_downscaling = 6; scaler->max_upscaling = 64; @@ -862,7 +963,7 @@ static int d71_splitter_init(struct d71_dev *d71, splitter = to_splitter(c); - set_range(&splitter->hsize, 4, d71->max_line_size); + set_range(&splitter->hsize, 4, get_blk_line_size(d71, reg)); set_range(&splitter->vsize, 4, d71->max_vsize); return 0; @@ -933,7 +1034,8 @@ static int d71_merger_init(struct d71_dev *d71, merger = to_merger(c); - set_range(&merger->hsize_merged, 4, 4032); + set_range(&merger->hsize_merged, 4, + __get_blk_line_size(d71, reg, 4032)); set_range(&merger->vsize_merged, 4, 4096); return 0; @@ -944,13 +1046,26 @@ static void d71_improc_update(struct komeda_component *c, { struct komeda_improc_state *st = to_improc_st(state); u32 __iomem *reg = c->reg; - u32 index; + u32 index, mask = 0, ctrl = 0; for_each_changed_input(state, index) malidp_write32(reg, BLK_INPUT_ID0 + index * 4, to_d71_input_id(state, index)); malidp_write32(reg, BLK_SIZE, HV_SIZE(st->hsize, st->vsize)); + malidp_write32(reg, IPS_DEPTH, st->color_depth); + + mask |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420; + + /* config color format */ + if (st->color_format == DRM_COLOR_FORMAT_YCRCB420) + ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422 | IPS_CTRL_CHD420; + else if (st->color_format == DRM_COLOR_FORMAT_YCRCB422) + ctrl |= IPS_CTRL_YUV | IPS_CTRL_CHD422; + else if (st->color_format == DRM_COLOR_FORMAT_YCRCB444) + ctrl |= IPS_CTRL_YUV; + + malidp_write32_mask(reg, BLK_CONTROL, mask, ctrl); } static void d71_improc_dump(struct komeda_component *c, struct seq_file *sf) @@ -1218,6 +1333,90 @@ int d71_probe_block(struct d71_dev *d71, return err; } +static void d71_gcu_dump(struct d71_dev *d71, struct seq_file *sf) +{ + u32 v[5]; + + seq_puts(sf, "\n------ GCU ------\n"); + + get_values_from_reg(d71->gcu_addr, 0, 3, v); + seq_printf(sf, "GLB_ARCH_ID:\t\t0x%X\n", v[0]); + seq_printf(sf, "GLB_CORE_ID:\t\t0x%X\n", v[1]); + seq_printf(sf, "GLB_CORE_INFO:\t\t0x%X\n", v[2]); + + get_values_from_reg(d71->gcu_addr, 0x10, 1, v); + seq_printf(sf, "GLB_IRQ_STATUS:\t\t0x%X\n", v[0]); + + get_values_from_reg(d71->gcu_addr, 0xA0, 5, v); + seq_printf(sf, "GCU_IRQ_RAW_STATUS:\t0x%X\n", v[0]); + seq_printf(sf, "GCU_IRQ_CLEAR:\t\t0x%X\n", v[1]); + seq_printf(sf, "GCU_IRQ_MASK:\t\t0x%X\n", v[2]); + seq_printf(sf, "GCU_IRQ_STATUS:\t\t0x%X\n", v[3]); + seq_printf(sf, "GCU_STATUS:\t\t0x%X\n", v[4]); + + get_values_from_reg(d71->gcu_addr, 0xD0, 3, v); + seq_printf(sf, "GCU_CONTROL:\t\t0x%X\n", v[0]); + seq_printf(sf, "GCU_CONFIG_VALID0:\t0x%X\n", v[1]); + seq_printf(sf, "GCU_CONFIG_VALID1:\t0x%X\n", v[2]); +} + +static void d71_lpu_dump(struct d71_pipeline *pipe, struct seq_file *sf) +{ + u32 v[6]; + + seq_printf(sf, "\n------ LPU%d ------\n", pipe->base.id); + + dump_block_header(sf, pipe->lpu_addr); + + get_values_from_reg(pipe->lpu_addr, 0xA0, 6, v); + seq_printf(sf, "LPU_IRQ_RAW_STATUS:\t0x%X\n", v[0]); + seq_printf(sf, "LPU_IRQ_CLEAR:\t\t0x%X\n", v[1]); + seq_printf(sf, "LPU_IRQ_MASK:\t\t0x%X\n", v[2]); + seq_printf(sf, "LPU_IRQ_STATUS:\t\t0x%X\n", v[3]); + seq_printf(sf, "LPU_STATUS:\t\t0x%X\n", v[4]); + seq_printf(sf, "LPU_TBU_STATUS:\t\t0x%X\n", v[5]); + + get_values_from_reg(pipe->lpu_addr, 0xC0, 1, v); + seq_printf(sf, "LPU_INFO:\t\t0x%X\n", v[0]); + + get_values_from_reg(pipe->lpu_addr, 0xD0, 3, v); + seq_printf(sf, "LPU_RAXI_CONTROL:\t0x%X\n", v[0]); + seq_printf(sf, "LPU_WAXI_CONTROL:\t0x%X\n", v[1]); + seq_printf(sf, "LPU_TBU_CONTROL:\t0x%X\n", v[2]); +} + +static void d71_dou_dump(struct d71_pipeline *pipe, struct seq_file *sf) +{ + u32 v[5]; + + seq_printf(sf, "\n------ DOU%d ------\n", pipe->base.id); + + dump_block_header(sf, pipe->dou_addr); + + get_values_from_reg(pipe->dou_addr, 0xA0, 5, v); + seq_printf(sf, "DOU_IRQ_RAW_STATUS:\t0x%X\n", v[0]); + seq_printf(sf, "DOU_IRQ_CLEAR:\t\t0x%X\n", v[1]); + seq_printf(sf, "DOU_IRQ_MASK:\t\t0x%X\n", v[2]); + seq_printf(sf, "DOU_IRQ_STATUS:\t\t0x%X\n", v[3]); + seq_printf(sf, "DOU_STATUS:\t\t0x%X\n", v[4]); +} + +static void d71_pipeline_dump(struct komeda_pipeline *pipe, struct seq_file *sf) +{ + struct d71_pipeline *d71_pipe = to_d71_pipeline(pipe); + + d71_lpu_dump(d71_pipe, sf); + d71_dou_dump(d71_pipe, sf); +} + const struct komeda_pipeline_funcs d71_pipeline_funcs = { - .downscaling_clk_check = d71_downscaling_clk_check, + .downscaling_clk_check = d71_downscaling_clk_check, + .dump_register = d71_pipeline_dump, }; + +void d71_dump(struct komeda_dev *mdev, struct seq_file *sf) +{ + struct d71_dev *d71 = mdev->chip_data; + + d71_gcu_dump(d71, sf); +} diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c index d567ab7ed314..822b23a1ce75 100644 --- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.c @@ -195,7 +195,7 @@ d71_irq_handler(struct komeda_dev *mdev, struct komeda_events *evts) if (gcu_status & GLB_IRQ_STATUS_PIPE1) evts->pipes[1] |= get_pipeline_event(d71->pipes[1], gcu_status); - return gcu_status ? IRQ_HANDLED : IRQ_NONE; + return IRQ_RETVAL(gcu_status); } #define ENABLED_GCU_IRQS (GCU_IRQ_CVAL0 | GCU_IRQ_CVAL1 | \ @@ -395,6 +395,22 @@ static int d71_enum_resources(struct komeda_dev *mdev) err = PTR_ERR(pipe); goto err_cleanup; } + + /* D71 HW doesn't update shadow registers when display output + * is turning off, so when we disable all pipeline components + * together with display output disable by one flush or one + * operation, the disable operation updated registers will not + * be flush to or valid in HW, which may leads problem. + * To workaround this problem, introduce a two phase disable. + * Phase1: Disabling components with display is on to make sure + * the disable can be flushed to HW. + * Phase2: Only turn-off display output. + */ + value = KOMEDA_PIPELINE_IMPROCS | + BIT(KOMEDA_COMPONENT_TIMING_CTRLR); + + pipe->standalone_disabled_comps = value; + d71->pipes[i] = to_d71_pipeline(pipe); } @@ -561,17 +577,18 @@ static int d71_disconnect_iommu(struct komeda_dev *mdev) } static const struct komeda_dev_funcs d71_chip_funcs = { - .init_format_table = d71_init_fmt_tbl, - .enum_resources = d71_enum_resources, - .cleanup = d71_cleanup, - .irq_handler = d71_irq_handler, - .enable_irq = d71_enable_irq, - .disable_irq = d71_disable_irq, - .on_off_vblank = d71_on_off_vblank, - .change_opmode = d71_change_opmode, - .flush = d71_flush, - .connect_iommu = d71_connect_iommu, - .disconnect_iommu = d71_disconnect_iommu, + .init_format_table = d71_init_fmt_tbl, + .enum_resources = d71_enum_resources, + .cleanup = d71_cleanup, + .irq_handler = d71_irq_handler, + .enable_irq = d71_enable_irq, + .disable_irq = d71_disable_irq, + .on_off_vblank = d71_on_off_vblank, + .change_opmode = d71_change_opmode, + .flush = d71_flush, + .connect_iommu = d71_connect_iommu, + .disconnect_iommu = d71_disconnect_iommu, + .dump_register = d71_dump, }; const struct komeda_dev_funcs * diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h index 84f1878b647d..c7357c2b9e62 100644 --- a/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_dev.h @@ -49,4 +49,6 @@ int d71_probe_block(struct d71_dev *d71, struct block_header *blk, u32 __iomem *reg); void d71_read_block_header(u32 __iomem *reg, struct block_header *blk); +void d71_dump(struct komeda_dev *mdev, struct seq_file *sf); + #endif /* !_D71_DEV_H_ */ diff --git a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h index 2d5e6d00b42c..1727dc993909 100644 --- a/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h +++ b/drivers/gpu/drm/arm/display/komeda/d71/d71_regs.h @@ -10,6 +10,7 @@ /* Common block registers offset */ #define BLK_BLOCK_INFO 0x000 #define BLK_PIPELINE_INFO 0x004 +#define BLK_MAX_LINE_SIZE 0x008 #define BLK_VALID_INPUT_ID0 0x020 #define BLK_OUTPUT_ID0 0x060 #define BLK_INPUT_ID0 0x080 @@ -321,6 +322,7 @@ #define L_INFO_RF BIT(0) #define L_INFO_CM BIT(1) #define L_INFO_ABUF_SIZE(x) (((x) >> 4) & 0x7) +#define L_INFO_YUV_MAX_LINESZ(x) (((x) >> 16) & 0xFFFF) /* Scaler registers */ #define SC_COEFFTAB 0x0DC @@ -494,13 +496,6 @@ enum d71_blk_type { #define D71_DEFAULT_PREPRETCH_LINE 5 #define D71_BUS_WIDTH_16_BYTES 16 -#define D71_MIN_LINE_SIZE 64 -#define D71_MIN_VERTICAL_SIZE 64 -#define D71_SC_MIN_LIN_SIZE 4 -#define D71_SC_MIN_VERTICAL_SIZE 4 -#define D71_SC_MAX_LIN_SIZE 2048 -#define D71_SC_MAX_VERTICAL_SIZE 4096 - #define D71_SC_MAX_UPSCALING 64 #define D71_SC_MAX_DOWNSCALING 6 #define D71_SC_SPLIT_OVERLAP 8 diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c index 624d257da20f..252015210fbc 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_crtc.c @@ -5,7 +5,6 @@ * */ #include <linux/clk.h> -#include <linux/pm_runtime.h> #include <linux/spinlock.h> #include <drm/drm_atomic.h> @@ -18,6 +17,33 @@ #include "komeda_dev.h" #include "komeda_kms.h" +void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st, + u32 *color_depths, u32 *color_formats) +{ + struct drm_connector *conn; + struct drm_connector_state *conn_st; + u32 conn_color_formats = ~0u; + int i, min_bpc = 31, conn_bpc = 0; + + for_each_new_connector_in_state(crtc_st->state, conn, conn_st, i) { + if (conn_st->crtc != crtc_st->crtc) + continue; + + conn_bpc = conn->display_info.bpc ? conn->display_info.bpc : 8; + conn_color_formats &= conn->display_info.color_formats; + + if (conn_bpc < min_bpc) + min_bpc = conn_bpc; + } + + /* connector doesn't config any color_format, use RGB444 as default */ + if (!conn_color_formats) + conn_color_formats = DRM_COLOR_FORMAT_RGB444; + + *color_depths = GENMASK(min_bpc, 0); + *color_formats = conn_color_formats; +} + static void komeda_crtc_update_clock_ratio(struct komeda_crtc_state *kcrtc_st) { u64 pxlclk, aclk; @@ -250,23 +276,57 @@ komeda_crtc_atomic_enable(struct drm_crtc *crtc, { komeda_crtc_prepare(to_kcrtc(crtc)); drm_crtc_vblank_on(crtc); + WARN_ON(drm_crtc_vblank_get(crtc)); komeda_crtc_do_flush(crtc, old); } static void +komeda_crtc_flush_and_wait_for_flip_done(struct komeda_crtc *kcrtc, + struct completion *input_flip_done) +{ + struct drm_device *drm = kcrtc->base.dev; + struct komeda_dev *mdev = kcrtc->master->mdev; + struct completion *flip_done; + struct completion temp; + int timeout; + + /* if caller doesn't send a flip_done, use a private flip_done */ + if (input_flip_done) { + flip_done = input_flip_done; + } else { + init_completion(&temp); + kcrtc->disable_done = &temp; + flip_done = &temp; + } + + mdev->funcs->flush(mdev, kcrtc->master->id, 0); + + /* wait the flip take affect.*/ + timeout = wait_for_completion_timeout(flip_done, HZ); + if (timeout == 0) { + DRM_ERROR("wait pipe%d flip done timeout\n", kcrtc->master->id); + if (!input_flip_done) { + unsigned long flags; + + spin_lock_irqsave(&drm->event_lock, flags); + kcrtc->disable_done = NULL; + spin_unlock_irqrestore(&drm->event_lock, flags); + } + } +} + +static void komeda_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_crtc_state *old) { struct komeda_crtc *kcrtc = to_kcrtc(crtc); struct komeda_crtc_state *old_st = to_kcrtc_st(old); - struct komeda_dev *mdev = crtc->dev->dev_private; struct komeda_pipeline *master = kcrtc->master; struct komeda_pipeline *slave = kcrtc->slave; - struct completion *disable_done = &crtc->state->commit->flip_done; - struct completion temp; - int timeout; + struct completion *disable_done; + bool needs_phase2 = false; - DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x.\n", + DRM_DEBUG_ATOMIC("CRTC%d_DISABLE: active_pipes: 0x%x, affected: 0x%x\n", drm_crtc_index(crtc), old_st->active_pipes, old_st->affected_pipes); @@ -274,7 +334,7 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc, komeda_pipeline_disable(slave, old->state); if (has_bit(master->id, old_st->active_pipes)) - komeda_pipeline_disable(master, old->state); + needs_phase2 = komeda_pipeline_disable(master, old->state); /* crtc_disable has two scenarios according to the state->active switch. * 1. active -> inactive @@ -293,32 +353,23 @@ komeda_crtc_atomic_disable(struct drm_crtc *crtc, * That's also the reason why skip modeset commit in * komeda_crtc_atomic_flush() */ - if (crtc->state->active) { - struct komeda_pipeline_state *pipe_st; - /* clear the old active_comps to zero */ - pipe_st = komeda_pipeline_get_old_state(master, old->state); - pipe_st->active_comps = 0; + disable_done = (needs_phase2 || crtc->state->active) ? + NULL : &crtc->state->commit->flip_done; - init_completion(&temp); - kcrtc->disable_done = &temp; - disable_done = &temp; - } + /* wait phase 1 disable done */ + komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done); - mdev->funcs->flush(mdev, master->id, 0); + /* phase 2 */ + if (needs_phase2) { + komeda_pipeline_disable(kcrtc->master, old->state); - /* wait the disable take affect.*/ - timeout = wait_for_completion_timeout(disable_done, HZ); - if (timeout == 0) { - DRM_ERROR("disable pipeline%d timeout.\n", kcrtc->master->id); - if (crtc->state->active) { - unsigned long flags; + disable_done = crtc->state->active ? + NULL : &crtc->state->commit->flip_done; - spin_lock_irqsave(&crtc->dev->event_lock, flags); - kcrtc->disable_done = NULL; - spin_unlock_irqrestore(&crtc->dev->event_lock, flags); - } + komeda_crtc_flush_and_wait_for_flip_done(kcrtc, disable_done); } + drm_crtc_vblank_put(crtc); drm_crtc_vblank_off(crtc); komeda_crtc_unprepare(kcrtc); } diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c index ca64a129c594..937a6d4c4865 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.c @@ -25,6 +25,8 @@ static int komeda_register_show(struct seq_file *sf, void *x) struct komeda_dev *mdev = sf->private; int i; + seq_puts(sf, "\n====== Komeda register dump =========\n"); + if (mdev->funcs->dump_register) mdev->funcs->dump_register(mdev, sf); @@ -91,9 +93,19 @@ config_id_show(struct device *dev, struct device_attribute *attr, char *buf) } static DEVICE_ATTR_RO(config_id); +static ssize_t +aclk_hz_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct komeda_dev *mdev = dev_to_mdev(dev); + + return snprintf(buf, PAGE_SIZE, "%lu\n", clk_get_rate(mdev->aclk)); +} +static DEVICE_ATTR_RO(aclk_hz); + static struct attribute *komeda_sysfs_entries[] = { &dev_attr_core_id.attr, &dev_attr_config_id.attr, + &dev_attr_aclk_hz.attr, NULL, }; @@ -216,7 +228,7 @@ struct komeda_dev *komeda_dev_create(struct device *dev) product->product_id, MALIDP_CORE_ID_PRODUCT_ID(mdev->chip.core_id)); err = -ENODEV; - goto err_cleanup; + goto disable_clk; } DRM_INFO("Found ARM Mali-D%x version r%dp%d\n", @@ -229,19 +241,19 @@ struct komeda_dev *komeda_dev_create(struct device *dev) err = mdev->funcs->enum_resources(mdev); if (err) { DRM_ERROR("enumerate display resource failed.\n"); - goto err_cleanup; + goto disable_clk; } err = komeda_parse_dt(dev, mdev); if (err) { DRM_ERROR("parse device tree failed.\n"); - goto err_cleanup; + goto disable_clk; } err = komeda_assemble_pipelines(mdev); if (err) { DRM_ERROR("assemble display pipelines failed.\n"); - goto err_cleanup; + goto disable_clk; } dev->dma_parms = &mdev->dma_parms; @@ -254,11 +266,14 @@ struct komeda_dev *komeda_dev_create(struct device *dev) if (mdev->iommu && mdev->funcs->connect_iommu) { err = mdev->funcs->connect_iommu(mdev); if (err) { + DRM_ERROR("connect iommu failed.\n"); mdev->iommu = NULL; - goto err_cleanup; + goto disable_clk; } } + clk_disable_unprepare(mdev->aclk); + err = sysfs_create_group(&dev->kobj, &komeda_sysfs_attr_group); if (err) { DRM_ERROR("create sysfs group failed.\n"); @@ -271,6 +286,8 @@ struct komeda_dev *komeda_dev_create(struct device *dev) return mdev; +disable_clk: + clk_disable_unprepare(mdev->aclk); err_cleanup: komeda_dev_destroy(mdev); return ERR_PTR(err); @@ -288,8 +305,12 @@ void komeda_dev_destroy(struct komeda_dev *mdev) debugfs_remove_recursive(mdev->debugfs_root); #endif + if (mdev->aclk) + clk_prepare_enable(mdev->aclk); + if (mdev->iommu && mdev->funcs->disconnect_iommu) - mdev->funcs->disconnect_iommu(mdev); + if (mdev->funcs->disconnect_iommu(mdev)) + DRM_ERROR("disconnect iommu failed.\n"); mdev->iommu = NULL; for (i = 0; i < mdev->n_pipelines; i++) { @@ -317,3 +338,47 @@ void komeda_dev_destroy(struct komeda_dev *mdev) devm_kfree(dev, mdev); } + +int komeda_dev_resume(struct komeda_dev *mdev) +{ + int ret = 0; + + clk_prepare_enable(mdev->aclk); + + if (mdev->iommu && mdev->funcs->connect_iommu) { + ret = mdev->funcs->connect_iommu(mdev); + if (ret < 0) { + DRM_ERROR("connect iommu failed.\n"); + goto disable_clk; + } + } + + ret = mdev->funcs->enable_irq(mdev); + +disable_clk: + clk_disable_unprepare(mdev->aclk); + + return ret; +} + +int komeda_dev_suspend(struct komeda_dev *mdev) +{ + int ret = 0; + + clk_prepare_enable(mdev->aclk); + + if (mdev->iommu && mdev->funcs->disconnect_iommu) { + ret = mdev->funcs->disconnect_iommu(mdev); + if (ret < 0) { + DRM_ERROR("disconnect iommu failed.\n"); + goto disable_clk; + } + } + + ret = mdev->funcs->disable_irq(mdev); + +disable_clk: + clk_disable_unprepare(mdev->aclk); + + return ret; +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h index d1c86b6174c8..414200233b64 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_dev.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_dev.h @@ -40,6 +40,17 @@ #define KOMEDA_ERR_TTNG BIT_ULL(30) #define KOMEDA_ERR_TTF BIT_ULL(31) +#define KOMEDA_ERR_EVENTS \ + (KOMEDA_EVENT_URUN | KOMEDA_EVENT_IBSY | KOMEDA_EVENT_OVR |\ + KOMEDA_ERR_TETO | KOMEDA_ERR_TEMR | KOMEDA_ERR_TITR |\ + KOMEDA_ERR_CPE | KOMEDA_ERR_CFGE | KOMEDA_ERR_AXIE |\ + KOMEDA_ERR_ACE0 | KOMEDA_ERR_ACE1 | KOMEDA_ERR_ACE2 |\ + KOMEDA_ERR_ACE3 | KOMEDA_ERR_DRIFTTO | KOMEDA_ERR_FRAMETO |\ + KOMEDA_ERR_ZME | KOMEDA_ERR_MERR | KOMEDA_ERR_TCF |\ + KOMEDA_ERR_TTNG | KOMEDA_ERR_TTF) + +#define KOMEDA_WARN_EVENTS KOMEDA_ERR_CSCE + /* malidp device id */ enum { MALI_D71 = 0, @@ -207,4 +218,13 @@ void komeda_dev_destroy(struct komeda_dev *mdev); struct komeda_dev *dev_to_mdev(struct device *dev); +#ifdef CONFIG_DRM_KOMEDA_ERROR_PRINT +void komeda_print_events(struct komeda_events *evts); +#else +static inline void komeda_print_events(struct komeda_events *evts) {} +#endif + +int komeda_dev_resume(struct komeda_dev *mdev); +int komeda_dev_suspend(struct komeda_dev *mdev); + #endif /*_KOMEDA_DEV_H_*/ diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c index 69ace6f9055d..d6c2222c5d33 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_drv.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_drv.c @@ -8,6 +8,7 @@ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/component.h> +#include <linux/pm_runtime.h> #include <drm/drm_of.h> #include "komeda_dev.h" #include "komeda_kms.h" @@ -136,13 +137,40 @@ static const struct of_device_id komeda_of_match[] = { MODULE_DEVICE_TABLE(of, komeda_of_match); +static int __maybe_unused komeda_pm_suspend(struct device *dev) +{ + struct komeda_drv *mdrv = dev_get_drvdata(dev); + struct drm_device *drm = &mdrv->kms->base; + int res; + + res = drm_mode_config_helper_suspend(drm); + + komeda_dev_suspend(mdrv->mdev); + + return res; +} + +static int __maybe_unused komeda_pm_resume(struct device *dev) +{ + struct komeda_drv *mdrv = dev_get_drvdata(dev); + struct drm_device *drm = &mdrv->kms->base; + + komeda_dev_resume(mdrv->mdev); + + return drm_mode_config_helper_resume(drm); +} + +static const struct dev_pm_ops komeda_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(komeda_pm_suspend, komeda_pm_resume) +}; + static struct platform_driver komeda_platform_driver = { .probe = komeda_platform_probe, .remove = komeda_platform_remove, .driver = { .name = "komeda", .of_match_table = komeda_of_match, - .pm = NULL, + .pm = &komeda_pm_ops, }, }; diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_event.c b/drivers/gpu/drm/arm/display/komeda/komeda_event.c new file mode 100644 index 000000000000..a36fb86cc054 --- /dev/null +++ b/drivers/gpu/drm/arm/display/komeda/komeda_event.c @@ -0,0 +1,140 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * (C) COPYRIGHT 2019 ARM Limited. All rights reserved. + * Author: James.Qian.Wang <james.qian.wang@arm.com> + * + */ +#include <drm/drm_print.h> + +#include "komeda_dev.h" + +struct komeda_str { + char *str; + u32 sz; + u32 len; +}; + +/* return 0 on success, < 0 on no space. + */ +static int komeda_sprintf(struct komeda_str *str, const char *fmt, ...) +{ + va_list args; + int num, free_sz; + int err; + + free_sz = str->sz - str->len - 1; + if (free_sz <= 0) + return -ENOSPC; + + va_start(args, fmt); + + num = vsnprintf(str->str + str->len, free_sz, fmt, args); + + va_end(args); + + if (num < free_sz) { + str->len += num; + err = 0; + } else { + str->len = str->sz - 1; + err = -ENOSPC; + } + + return err; +} + +static void evt_sprintf(struct komeda_str *str, u64 evt, const char *msg) +{ + if (evt) + komeda_sprintf(str, msg); +} + +static void evt_str(struct komeda_str *str, u64 events) +{ + if (events == 0ULL) { + komeda_sprintf(str, "None"); + return; + } + + evt_sprintf(str, events & KOMEDA_EVENT_VSYNC, "VSYNC|"); + evt_sprintf(str, events & KOMEDA_EVENT_FLIP, "FLIP|"); + evt_sprintf(str, events & KOMEDA_EVENT_EOW, "EOW|"); + evt_sprintf(str, events & KOMEDA_EVENT_MODE, "OP-MODE|"); + + evt_sprintf(str, events & KOMEDA_EVENT_URUN, "UNDERRUN|"); + evt_sprintf(str, events & KOMEDA_EVENT_OVR, "OVERRUN|"); + + /* GLB error */ + evt_sprintf(str, events & KOMEDA_ERR_MERR, "MERR|"); + evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|"); + + /* DOU error */ + evt_sprintf(str, events & KOMEDA_ERR_DRIFTTO, "DRIFTTO|"); + evt_sprintf(str, events & KOMEDA_ERR_FRAMETO, "FRAMETO|"); + evt_sprintf(str, events & KOMEDA_ERR_TETO, "TETO|"); + evt_sprintf(str, events & KOMEDA_ERR_CSCE, "CSCE|"); + + /* LPU errors or events */ + evt_sprintf(str, events & KOMEDA_EVENT_IBSY, "IBSY|"); + evt_sprintf(str, events & KOMEDA_ERR_AXIE, "AXIE|"); + evt_sprintf(str, events & KOMEDA_ERR_ACE0, "ACE0|"); + evt_sprintf(str, events & KOMEDA_ERR_ACE1, "ACE1|"); + evt_sprintf(str, events & KOMEDA_ERR_ACE2, "ACE2|"); + evt_sprintf(str, events & KOMEDA_ERR_ACE3, "ACE3|"); + + /* LPU TBU errors*/ + evt_sprintf(str, events & KOMEDA_ERR_TCF, "TCF|"); + evt_sprintf(str, events & KOMEDA_ERR_TTNG, "TTNG|"); + evt_sprintf(str, events & KOMEDA_ERR_TITR, "TITR|"); + evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|"); + evt_sprintf(str, events & KOMEDA_ERR_TTF, "TTF|"); + + /* CU errors*/ + evt_sprintf(str, events & KOMEDA_ERR_CPE, "COPROC|"); + evt_sprintf(str, events & KOMEDA_ERR_ZME, "ZME|"); + evt_sprintf(str, events & KOMEDA_ERR_CFGE, "CFGE|"); + evt_sprintf(str, events & KOMEDA_ERR_TEMR, "TEMR|"); + + if (str->len > 0 && (str->str[str->len - 1] == '|')) { + str->str[str->len - 1] = 0; + str->len--; + } +} + +static bool is_new_frame(struct komeda_events *a) +{ + return (a->pipes[0] | a->pipes[1]) & + (KOMEDA_EVENT_FLIP | KOMEDA_EVENT_EOW); +} + +void komeda_print_events(struct komeda_events *evts) +{ + u64 print_evts = KOMEDA_ERR_EVENTS; + static bool en_print = true; + + /* reduce the same msg print, only print the first evt for one frame */ + if (evts->global || is_new_frame(evts)) + en_print = true; + if (!en_print) + return; + + if ((evts->global | evts->pipes[0] | evts->pipes[1]) & print_evts) { + char msg[256]; + struct komeda_str str; + + str.str = msg; + str.sz = sizeof(msg); + str.len = 0; + + komeda_sprintf(&str, "gcu: "); + evt_str(&str, evts->global); + komeda_sprintf(&str, ", pipes[0]: "); + evt_str(&str, evts->pipes[0]); + komeda_sprintf(&str, ", pipes[1]: "); + evt_str(&str, evts->pipes[1]); + + DRM_ERROR("err detect: %s\n", msg); + + en_print = false; + } +} diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c index ae274902ff92..52648b4008bc 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c @@ -48,6 +48,8 @@ static irqreturn_t komeda_kms_irq_handler(int irq, void *data) memset(&evts, 0, sizeof(evts)); status = mdev->funcs->irq_handler(mdev, &evts); + komeda_print_events(&evts); + /* Notify the crtc to handle the events */ for (i = 0; i < kms->n_crtcs; i++) komeda_crtc_handle_event(&kms->crtcs[i], &evts); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h index 45c498e15e7a..456f3c435719 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.h @@ -166,6 +166,8 @@ static inline bool has_flip_h(u32 rot) return !!(rotation & DRM_MODE_REFLECT_X); } +void komeda_crtc_get_color_config(struct drm_crtc_state *crtc_st, + u32 *color_depths, u32 *color_formats); unsigned long komeda_crtc_get_aclk(struct komeda_crtc_state *kcrtc_st); int komeda_kms_setup_crtcs(struct komeda_kms_dev *kms, struct komeda_dev *mdev); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h index cf5bea578ad9..bd6ca7c87037 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline.h @@ -227,6 +227,8 @@ struct komeda_layer { /* accepted h/v input range before rotation */ struct malidp_range hsize_in, vsize_in; u32 layer_type; /* RICH, SIMPLE or WB */ + u32 line_sz; + u32 yuv_line_sz; /* maximum line size for YUV422 and YUV420 */ u32 supported_rots; /* komeda supports layer split which splits a whole image to two parts * left and right and handle them by two individual layer processors @@ -323,6 +325,7 @@ struct komeda_improc { struct komeda_improc_state { struct komeda_component_state base; + u8 color_format, color_depth; u16 hsize, vsize; }; @@ -389,6 +392,18 @@ struct komeda_pipeline { int id; /** @avail_comps: available components mask of pipeline */ u32 avail_comps; + /** + * @standalone_disabled_comps: + * + * When disable the pipeline, some components can not be disabled + * together with others, but need a sparated and standalone disable. + * The standalone_disabled_comps are the components which need to be + * disabled standalone, and this concept also introduce concept of + * two phase. + * phase 1: for disabling the common components. + * phase 2: for disabling the standalong_disabled_comps. + */ + u32 standalone_disabled_comps; /** @n_layers: the number of layer on @layers */ int n_layers; /** @layers: the pipeline layers */ @@ -535,7 +550,7 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe, struct komeda_pipeline_state * komeda_pipeline_get_old_state(struct komeda_pipeline *pipe, struct drm_atomic_state *state); -void komeda_pipeline_disable(struct komeda_pipeline *pipe, +bool komeda_pipeline_disable(struct komeda_pipeline *pipe, struct drm_atomic_state *old_state); void komeda_pipeline_update(struct komeda_pipeline *pipe, struct drm_atomic_state *old_state); diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c index b848270e0a1f..52750116aa19 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_pipeline_state.c @@ -285,6 +285,7 @@ komeda_layer_check_cfg(struct komeda_layer *layer, struct komeda_data_flow_cfg *dflow) { u32 src_x, src_y, src_w, src_h; + u32 line_sz, max_line_sz; if (!komeda_fb_is_layer_supported(kfb, layer->layer_type, dflow->rot)) return -EINVAL; @@ -314,6 +315,22 @@ komeda_layer_check_cfg(struct komeda_layer *layer, return -EINVAL; } + if (drm_rotation_90_or_270(dflow->rot)) + line_sz = dflow->in_h; + else + line_sz = dflow->in_w; + + if (kfb->base.format->hsub > 1) + max_line_sz = layer->yuv_line_sz; + else + max_line_sz = layer->line_sz; + + if (line_sz > max_line_sz) { + DRM_DEBUG_ATOMIC("Required line_sz: %d exceeds the max size %d\n", + line_sz, max_line_sz); + return -EINVAL; + } + return 0; } @@ -743,6 +760,7 @@ komeda_improc_validate(struct komeda_improc *improc, struct komeda_data_flow_cfg *dflow) { struct drm_crtc *crtc = kcrtc_st->base.crtc; + struct drm_crtc_state *crtc_st = &kcrtc_st->base; struct komeda_component_state *c_st; struct komeda_improc_state *st; @@ -756,6 +774,34 @@ komeda_improc_validate(struct komeda_improc *improc, st->hsize = dflow->in_w; st->vsize = dflow->in_h; + if (drm_atomic_crtc_needs_modeset(crtc_st)) { + u32 output_depths, output_formats; + u32 avail_depths, avail_formats; + + komeda_crtc_get_color_config(crtc_st, &output_depths, + &output_formats); + + avail_depths = output_depths & improc->supported_color_depths; + if (avail_depths == 0) { + DRM_DEBUG_ATOMIC("No available color depths, conn depths: 0x%x & display: 0x%x\n", + output_depths, + improc->supported_color_depths); + return -EINVAL; + } + + avail_formats = output_formats & + improc->supported_color_formats; + if (!avail_formats) { + DRM_DEBUG_ATOMIC("No available color_formats, conn formats 0x%x & display: 0x%x\n", + output_formats, + improc->supported_color_formats); + return -EINVAL; + } + + st->color_depth = __fls(avail_depths); + st->color_format = BIT(__ffs(avail_formats)); + } + komeda_component_add_input(&st->base, &dflow->input, 0); komeda_component_set_output(&dflow->input, &improc->base, 0); @@ -1218,7 +1264,17 @@ int komeda_release_unclaimed_resources(struct komeda_pipeline *pipe, return 0; } -void komeda_pipeline_disable(struct komeda_pipeline *pipe, +/* Since standalong disabled components must be disabled separately and in the + * last, So a complete disable operation may needs to call pipeline_disable + * twice (two phase disabling). + * Phase 1: disable the common components, flush it. + * Phase 2: disable the standalone disabled components, flush it. + * + * RETURNS: + * true: disable is not complete, needs a phase 2 disable. + * false: disable is complete. + */ +bool komeda_pipeline_disable(struct komeda_pipeline *pipe, struct drm_atomic_state *old_state) { struct komeda_pipeline_state *old; @@ -1228,9 +1284,14 @@ void komeda_pipeline_disable(struct komeda_pipeline *pipe, old = komeda_pipeline_get_old_state(pipe, old_state); - disabling_comps = old->active_comps; - DRM_DEBUG_ATOMIC("PIPE%d: disabling_comps: 0x%x.\n", - pipe->id, disabling_comps); + disabling_comps = old->active_comps & + (~pipe->standalone_disabled_comps); + if (!disabling_comps) + disabling_comps = old->active_comps & + pipe->standalone_disabled_comps; + + DRM_DEBUG_ATOMIC("PIPE%d: active_comps: 0x%x, disabling_comps: 0x%x.\n", + pipe->id, old->active_comps, disabling_comps); dp_for_each_set_bit(id, disabling_comps) { c = komeda_pipeline_get_component(pipe, id); @@ -1248,6 +1309,13 @@ void komeda_pipeline_disable(struct komeda_pipeline *pipe, c->funcs->disable(c); } + + /* Update the pipeline state, if there are components that are still + * active, return true for calling the phase 2 disable. + */ + old->active_comps &= ~disabling_comps; + + return old->active_comps ? true : false; } void komeda_pipeline_update(struct komeda_pipeline *pipe, diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c index b72840c06ab7..e465cc4879c9 100644 --- a/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c +++ b/drivers/gpu/drm/arm/display/komeda/komeda_wb_connector.c @@ -141,6 +141,7 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms, struct komeda_dev *mdev = kms->base.dev_private; struct komeda_wb_connector *kwb_conn; struct drm_writeback_connector *wb_conn; + struct drm_display_info *info; u32 *formats, n_formats = 0; int err; @@ -172,6 +173,10 @@ static int komeda_wb_connector_add(struct komeda_kms_dev *kms, drm_connector_helper_add(&wb_conn->base, &komeda_wb_conn_helper_funcs); + info = &kwb_conn->base.base.display_info; + info->bpc = __fls(kcrtc->master->improc->supported_color_depths); + info->color_formats = kcrtc->master->improc->supported_color_formats; + kcrtc->wb_conn = kwb_conn; return 0; diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c index 333b88a5efb0..37d92a06318e 100644 --- a/drivers/gpu/drm/arm/malidp_drv.c +++ b/drivers/gpu/drm/arm/malidp_drv.c @@ -368,7 +368,7 @@ malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file, return false; } -struct drm_framebuffer * +static struct drm_framebuffer * malidp_fb_create(struct drm_device *dev, struct drm_file *file, const struct drm_mode_fb_cmd2 *mode_cmd) { @@ -491,9 +491,9 @@ void malidp_error(struct malidp_drm *malidp, spin_unlock_irqrestore(&malidp->errors_lock, irqflags); } -void malidp_error_stats_dump(const char *prefix, - struct malidp_error_stats error_stats, - struct seq_file *m) +static void malidp_error_stats_dump(const char *prefix, + struct malidp_error_stats error_stats, + struct seq_file *m) { seq_printf(m, "[%s] num_errors : %d\n", prefix, error_stats.num_errors); @@ -665,7 +665,7 @@ static ssize_t core_id_show(struct device *dev, struct device_attribute *attr, return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id); } -DEVICE_ATTR_RO(core_id); +static DEVICE_ATTR_RO(core_id); static int malidp_init_sysfs(struct device *dev) { @@ -817,6 +817,12 @@ static int malidp_bind(struct device *dev) malidp->core_id = version; + ret = of_property_read_u32(dev->of_node, + "arm,malidp-arqos-value", + &hwdev->arqos_value); + if (ret) + hwdev->arqos_value = 0x0; + /* set the number of lines used for output of RGB data */ ret = of_property_read_u8_array(dev->of_node, "arm,malidp-output-port-lines", diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c index bd8265f02e0b..ca570b135478 100644 --- a/drivers/gpu/drm/arm/malidp_hw.c +++ b/drivers/gpu/drm/arm/malidp_hw.c @@ -379,6 +379,15 @@ static void malidp500_modeset(struct malidp_hw_device *hwdev, struct videomode * malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); else malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_ILACED, MALIDP_DE_DISPLAY_FUNC); + + /* + * Program the RQoS register to avoid high resolutions flicker + * issue on the LS1028A. + */ + if (hwdev->arqos_value) { + val = hwdev->arqos_value; + malidp_hw_setbits(hwdev, val, MALIDP500_RQOS_QUALITY); + } } int malidp_format_get_bpp(u32 fmt) diff --git a/drivers/gpu/drm/arm/malidp_hw.h b/drivers/gpu/drm/arm/malidp_hw.h index 968a65eed371..e4c36bc90bda 100644 --- a/drivers/gpu/drm/arm/malidp_hw.h +++ b/drivers/gpu/drm/arm/malidp_hw.h @@ -251,6 +251,9 @@ struct malidp_hw_device { /* size of memory used for rotating layers, up to two banks available */ u32 rotation_memory[2]; + + /* priority level of RQOS register used for driven the ARQOS signal */ + u32 arqos_value; }; static inline u32 malidp_hw_read(struct malidp_hw_device *hwdev, u32 reg) diff --git a/drivers/gpu/drm/arm/malidp_regs.h b/drivers/gpu/drm/arm/malidp_regs.h index 993031542fa1..514c50dcb74d 100644 --- a/drivers/gpu/drm/arm/malidp_regs.h +++ b/drivers/gpu/drm/arm/malidp_regs.h @@ -210,6 +210,16 @@ #define MALIDP500_CONFIG_VALID 0x00f00 #define MALIDP500_CONFIG_ID 0x00fd4 +/* + * The quality of service (QoS) register on the DP500. RQOS register values + * are driven by the ARQOS signal, using AXI transacations, dependent on the + * FIFO input level. + * The RQOS register can also set QoS levels for: + * - RED_ARQOS @ A 4-bit signal value for close to underflow conditions + * - GREEN_ARQOS @ A 4-bit signal value for normal conditions + */ +#define MALIDP500_RQOS_QUALITY 0x00500 + /* register offsets and bits specific to DP550/DP650 */ #define MALIDP550_ADDR_SPACE_SIZE 0x10000 #define MALIDP550_DE_CONTROL 0x00010 diff --git a/drivers/gpu/drm/ast/Kconfig b/drivers/gpu/drm/ast/Kconfig index 829620d5326c..fbcf2f45cef5 100644 --- a/drivers/gpu/drm/ast/Kconfig +++ b/drivers/gpu/drm/ast/Kconfig @@ -4,6 +4,8 @@ config DRM_AST depends on DRM && PCI && MMU select DRM_KMS_HELPER select DRM_VRAM_HELPER + select DRM_TTM + select DRM_TTM_HELPER help Say yes for experimental AST GPU driver. Do not enable this driver without having a working -modesetting, diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c index 6ed6ff49efc0..1f17794b0890 100644 --- a/drivers/gpu/drm/ast/ast_drv.c +++ b/drivers/gpu/drm/ast/ast_drv.c @@ -35,7 +35,6 @@ #include <drm/drm_gem_vram_helper.h> #include <drm/drm_pci.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_vram_mm_helper.h> #include "ast_drv.h" @@ -201,10 +200,7 @@ static struct pci_driver ast_pci_driver = { .driver.pm = &ast_pm_ops, }; -static const struct file_operations ast_fops = { - .owner = THIS_MODULE, - DRM_VRAM_MM_FILE_OPERATIONS -}; +DEFINE_DRM_GEM_FOPS(ast_fops); static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM, diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index 244cc7c382af..ff161bd622f3 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -82,6 +82,25 @@ enum ast_tx_chip { #define AST_DRAM_4Gx16 7 #define AST_DRAM_8Gx16 8 + +#define AST_MAX_HWC_WIDTH 64 +#define AST_MAX_HWC_HEIGHT 64 + +#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH * AST_MAX_HWC_HEIGHT * 2) +#define AST_HWC_SIGNATURE_SIZE 32 + +#define AST_DEFAULT_HWC_NUM 2 + +/* define for signature structure */ +#define AST_HWC_SIGNATURE_CHECKSUM 0x00 +#define AST_HWC_SIGNATURE_SizeX 0x04 +#define AST_HWC_SIGNATURE_SizeY 0x08 +#define AST_HWC_SIGNATURE_X 0x0C +#define AST_HWC_SIGNATURE_Y 0x10 +#define AST_HWC_SIGNATURE_HOTSPOTX 0x14 +#define AST_HWC_SIGNATURE_HOTSPOTY 0x18 + + struct ast_private { struct drm_device *dev; @@ -97,8 +116,11 @@ struct ast_private { int fb_mtrr; - struct drm_gem_object *cursor_cache; - int next_cursor; + struct { + struct drm_gem_vram_object *gbo[AST_DEFAULT_HWC_NUM]; + unsigned int next_index; + } cursor; + bool support_wide_screen; enum { ast_use_p2a, @@ -199,23 +221,6 @@ static inline void ast_open_key(struct ast_private *ast) #define AST_VIDMEM_DEFAULT_SIZE AST_VIDMEM_SIZE_8M -#define AST_MAX_HWC_WIDTH 64 -#define AST_MAX_HWC_HEIGHT 64 - -#define AST_HWC_SIZE (AST_MAX_HWC_WIDTH*AST_MAX_HWC_HEIGHT*2) -#define AST_HWC_SIGNATURE_SIZE 32 - -#define AST_DEFAULT_HWC_NUM 2 -/* define for signature structure */ -#define AST_HWC_SIGNATURE_CHECKSUM 0x00 -#define AST_HWC_SIGNATURE_SizeX 0x04 -#define AST_HWC_SIGNATURE_SizeY 0x08 -#define AST_HWC_SIGNATURE_X 0x0C -#define AST_HWC_SIGNATURE_Y 0x10 -#define AST_HWC_SIGNATURE_HOTSPOTX 0x14 -#define AST_HWC_SIGNATURE_HOTSPOTY 0x18 - - struct ast_i2c_chan { struct i2c_adapter adapter; struct drm_device *dev; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 50de8e47659c..21715d6a9b56 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -33,7 +33,6 @@ #include <drm/drm_gem.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_gem_vram_helper.h> -#include <drm/drm_vram_mm_helper.h> #include "ast_drv.h" diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index d349c721501c..b13eaa2619ab 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -687,17 +687,6 @@ static void ast_encoder_destroy(struct drm_encoder *encoder) kfree(encoder); } - -static struct drm_encoder *ast_best_single_encoder(struct drm_connector *connector) -{ - int enc_id = connector->encoder_ids[0]; - /* pick the encoder ids */ - if (enc_id) - return drm_encoder_find(connector->dev, NULL, enc_id); - return NULL; -} - - static const struct drm_encoder_funcs ast_enc_funcs = { .destroy = ast_encoder_destroy, }; @@ -847,7 +836,6 @@ static void ast_connector_destroy(struct drm_connector *connector) static const struct drm_connector_helper_funcs ast_connector_helper_funcs = { .mode_valid = ast_mode_valid, .get_modes = ast_get_modes, - .best_encoder = ast_best_single_encoder, }; static const struct drm_connector_funcs ast_connector_funcs = { @@ -895,50 +883,53 @@ static int ast_connector_init(struct drm_device *dev) static int ast_cursor_init(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; - int size; - int ret; - struct drm_gem_object *obj; + size_t size, i; struct drm_gem_vram_object *gbo; - s64 gpu_addr; - void *base; + int ret; - size = (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE) * AST_DEFAULT_HWC_NUM; + size = roundup(AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE, PAGE_SIZE); - ret = ast_gem_create(dev, size, true, &obj); - if (ret) - return ret; - gbo = drm_gem_vram_of_gem(obj); - ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); - if (ret) - goto fail; - gpu_addr = drm_gem_vram_offset(gbo); - if (gpu_addr < 0) { - drm_gem_vram_unpin(gbo); - ret = (int)gpu_addr; - goto fail; - } + for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) { + gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, + size, 0, false); + if (IS_ERR(gbo)) { + ret = PTR_ERR(gbo); + goto err_drm_gem_vram_put; + } + ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM | + DRM_GEM_VRAM_PL_FLAG_TOPDOWN); + if (ret) { + drm_gem_vram_put(gbo); + goto err_drm_gem_vram_put; + } - /* kmap the object */ - base = drm_gem_vram_kmap(gbo, true, NULL); - if (IS_ERR(base)) { - ret = PTR_ERR(base); - goto fail; + ast->cursor.gbo[i] = gbo; } - ast->cursor_cache = obj; return 0; -fail: + +err_drm_gem_vram_put: + while (i) { + --i; + gbo = ast->cursor.gbo[i]; + drm_gem_vram_unpin(gbo); + drm_gem_vram_put(gbo); + ast->cursor.gbo[i] = NULL; + } return ret; } static void ast_cursor_fini(struct drm_device *dev) { struct ast_private *ast = dev->dev_private; - struct drm_gem_vram_object *gbo = - drm_gem_vram_of_gem(ast->cursor_cache); - drm_gem_vram_kunmap(gbo); - drm_gem_vram_unpin(gbo); - drm_gem_object_put_unlocked(ast->cursor_cache); + size_t i; + struct drm_gem_vram_object *gbo; + + for (i = 0; i < ARRAY_SIZE(ast->cursor.gbo); ++i) { + gbo = ast->cursor.gbo[i]; + drm_gem_vram_unpin(gbo); + drm_gem_vram_put(gbo); + } } int ast_mode_init(struct drm_device *dev) @@ -1076,23 +1067,6 @@ static void ast_i2c_destroy(struct ast_i2c_chan *i2c) kfree(i2c); } -static void ast_show_cursor(struct drm_crtc *crtc) -{ - struct ast_private *ast = crtc->dev->dev_private; - u8 jreg; - - jreg = 0x2; - /* enable ARGB cursor */ - jreg |= 1; - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg); -} - -static void ast_hide_cursor(struct drm_crtc *crtc) -{ - struct ast_private *ast = crtc->dev->dev_private; - ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00); -} - static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height) { union { @@ -1149,21 +1123,99 @@ static u32 copy_cursor_image(u8 *src, u8 *dst, int width, int height) return csum; } +static int ast_cursor_update(void *dst, void *src, unsigned int width, + unsigned int height) +{ + u32 csum; + + /* do data transfer to cursor cache */ + csum = copy_cursor_image(src, dst, width, height); + + /* write checksum + signature */ + dst += AST_HWC_SIZE; + writel(csum, dst); + writel(width, dst + AST_HWC_SIGNATURE_SizeX); + writel(height, dst + AST_HWC_SIGNATURE_SizeY); + writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX); + writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY); + + return 0; +} + +static void ast_cursor_set_base(struct ast_private *ast, u64 address) +{ + u8 addr0 = (address >> 3) & 0xff; + u8 addr1 = (address >> 11) & 0xff; + u8 addr2 = (address >> 19) & 0xff; + + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, addr0); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, addr1); + ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, addr2); +} + +static int ast_show_cursor(struct drm_crtc *crtc, void *src, + unsigned int width, unsigned int height) +{ + struct ast_private *ast = crtc->dev->dev_private; + struct ast_crtc *ast_crtc = to_ast_crtc(crtc); + struct drm_gem_vram_object *gbo; + void *dst; + s64 off; + int ret; + u8 jreg; + + gbo = ast->cursor.gbo[ast->cursor.next_index]; + dst = drm_gem_vram_vmap(gbo); + if (IS_ERR(dst)) + return PTR_ERR(dst); + off = drm_gem_vram_offset(gbo); + if (off < 0) { + ret = (int)off; + goto err_drm_gem_vram_vunmap; + } + + ret = ast_cursor_update(dst, src, width, height); + if (ret) + goto err_drm_gem_vram_vunmap; + ast_cursor_set_base(ast, off); + + ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width; + ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height; + + jreg = 0x2; + /* enable ARGB cursor */ + jreg |= 1; + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg); + + ++ast->cursor.next_index; + ast->cursor.next_index %= ARRAY_SIZE(ast->cursor.gbo); + + drm_gem_vram_vunmap(gbo, dst); + + return 0; + +err_drm_gem_vram_vunmap: + drm_gem_vram_vunmap(gbo, dst); + return ret; +} + +static void ast_hide_cursor(struct drm_crtc *crtc) +{ + struct ast_private *ast = crtc->dev->dev_private; + + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, 0x00); +} + static int ast_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height) { - struct ast_private *ast = crtc->dev->dev_private; - struct ast_crtc *ast_crtc = to_ast_crtc(crtc); struct drm_gem_object *obj; struct drm_gem_vram_object *gbo; - s64 dst_gpu; - u64 gpu_addr; - u32 csum; + u8 *src; int ret; - u8 *src, *dst; if (!handle) { ast_hide_cursor(crtc); @@ -1179,70 +1231,23 @@ static int ast_cursor_set(struct drm_crtc *crtc, return -ENOENT; } gbo = drm_gem_vram_of_gem(obj); - - ret = drm_gem_vram_pin(gbo, 0); - if (ret) - goto err_drm_gem_object_put_unlocked; - src = drm_gem_vram_kmap(gbo, true, NULL); + src = drm_gem_vram_vmap(gbo); if (IS_ERR(src)) { ret = PTR_ERR(src); - goto err_drm_gem_vram_unpin; - } - - dst = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache), - false, NULL); - if (IS_ERR(dst)) { - ret = PTR_ERR(dst); - goto err_drm_gem_vram_kunmap; - } - dst_gpu = drm_gem_vram_offset(drm_gem_vram_of_gem(ast->cursor_cache)); - if (dst_gpu < 0) { - ret = (int)dst_gpu; - goto err_drm_gem_vram_kunmap; - } - - dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor; - - /* do data transfer to cursor cache */ - csum = copy_cursor_image(src, dst, width, height); - - /* write checksum + signature */ - { - struct drm_gem_vram_object *dst_gbo = - drm_gem_vram_of_gem(ast->cursor_cache); - u8 *dst = drm_gem_vram_kmap(dst_gbo, false, NULL); - dst += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE; - writel(csum, dst); - writel(width, dst + AST_HWC_SIGNATURE_SizeX); - writel(height, dst + AST_HWC_SIGNATURE_SizeY); - writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTX); - writel(0, dst + AST_HWC_SIGNATURE_HOTSPOTY); - - /* set pattern offset */ - gpu_addr = (u64)dst_gpu; - gpu_addr += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor; - gpu_addr >>= 3; - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc8, gpu_addr & 0xff); - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc9, (gpu_addr >> 8) & 0xff); - ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xca, (gpu_addr >> 16) & 0xff); + goto err_drm_gem_object_put_unlocked; } - ast_crtc->offset_x = AST_MAX_HWC_WIDTH - width; - ast_crtc->offset_y = AST_MAX_HWC_WIDTH - height; - - ast->next_cursor = (ast->next_cursor + 1) % AST_DEFAULT_HWC_NUM; - ast_show_cursor(crtc); + ret = ast_show_cursor(crtc, src, width, height); + if (ret) + goto err_drm_gem_vram_vunmap; - drm_gem_vram_kunmap(gbo); - drm_gem_vram_unpin(gbo); + drm_gem_vram_vunmap(gbo, src); drm_gem_object_put_unlocked(obj); return 0; -err_drm_gem_vram_kunmap: - drm_gem_vram_kunmap(gbo); -err_drm_gem_vram_unpin: - drm_gem_vram_unpin(gbo); +err_drm_gem_vram_vunmap: + drm_gem_vram_vunmap(gbo, src); err_drm_gem_object_put_unlocked: drm_gem_object_put_unlocked(obj); return ret; @@ -1253,12 +1258,17 @@ static int ast_cursor_move(struct drm_crtc *crtc, { struct ast_crtc *ast_crtc = to_ast_crtc(crtc); struct ast_private *ast = crtc->dev->dev_private; + struct drm_gem_vram_object *gbo; int x_offset, y_offset; - u8 *sig; + u8 *dst, *sig; + u8 jreg; - sig = drm_gem_vram_kmap(drm_gem_vram_of_gem(ast->cursor_cache), - false, NULL); - sig += (AST_HWC_SIZE + AST_HWC_SIGNATURE_SIZE)*ast->next_cursor + AST_HWC_SIZE; + gbo = ast->cursor.gbo[ast->cursor.next_index]; + dst = drm_gem_vram_vmap(gbo); + if (IS_ERR(dst)) + return PTR_ERR(dst); + + sig = dst + AST_HWC_SIZE; writel(x, sig + AST_HWC_SIGNATURE_X); writel(y, sig + AST_HWC_SIGNATURE_Y); @@ -1281,7 +1291,11 @@ static int ast_cursor_move(struct drm_crtc *crtc, ast_set_index_reg(ast, AST_IO_CRTC_PORT, 0xc7, ((y >> 8) & 0x07)); /* dummy write to fire HWC */ - ast_show_cursor(crtc); + jreg = 0x02 | + 0x01; /* enable ARGB4444 cursor */ + ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xcb, 0xfc, jreg); + + drm_gem_vram_vunmap(gbo, dst); return 0; } diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index c52d92294171..fad34106083a 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -30,7 +30,6 @@ #include <drm/drm_print.h> #include <drm/drm_gem_vram_helper.h> -#include <drm/drm_vram_mm_helper.h> #include "ast_drv.h" @@ -42,7 +41,7 @@ int ast_mm_init(struct ast_private *ast) vmm = drm_vram_helper_alloc_mm( dev, pci_resource_start(dev->pdev, 0), - ast->vram_size, &drm_gem_vram_mm_funcs); + ast->vram_size); if (IS_ERR(vmm)) { ret = PTR_ERR(vmm); DRM_ERROR("Error initializing VRAM MM; %d\n", ret); diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 375fa84c548b..121b62682d80 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -107,7 +107,8 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint) output->encoder.possible_crtcs = 0x1; if (panel) { - bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_Unknown); + bridge = drm_panel_bridge_add_typed(panel, + DRM_MODE_CONNECTOR_Unknown); if (IS_ERR(bridge)) return PTR_ERR(bridge); } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index 89f5a756fa37..034f202dfe8f 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -601,7 +601,6 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, struct drm_framebuffer *fb = state->base.fb; const struct drm_display_mode *mode; struct drm_crtc_state *crtc_state; - unsigned int tmp; int ret; int i; @@ -694,9 +693,7 @@ static int atmel_hlcdc_plane_atomic_check(struct drm_plane *p, * Swap width and size in case of 90 or 270 degrees rotation */ if (drm_rotation_90_or_270(state->base.rotation)) { - tmp = state->src_w; - state->src_w = state->src_h; - state->src_h = tmp; + swap(state->src_w, state->src_h); } if (!desc->layout.size && diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig index 32b043abb668..7bcdf294fed8 100644 --- a/drivers/gpu/drm/bochs/Kconfig +++ b/drivers/gpu/drm/bochs/Kconfig @@ -4,6 +4,8 @@ config DRM_BOCHS depends on DRM && PCI && MMU select DRM_KMS_HELPER select DRM_VRAM_HELPER + select DRM_TTM + select DRM_TTM_HELPER help Choose this option for qemu. If M is selected the module will be called bochs-drm. diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h index 68483a2fc12c..917767173ee6 100644 --- a/drivers/gpu/drm/bochs/bochs.h +++ b/drivers/gpu/drm/bochs/bochs.h @@ -10,7 +10,6 @@ #include <drm/drm_gem.h> #include <drm/drm_gem_vram_helper.h> #include <drm/drm_simple_kms_helper.h> -#include <drm/drm_vram_mm_helper.h> /* ---------------------------------------------------------------------- */ diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c index 770e1625d05e..10460878414e 100644 --- a/drivers/gpu/drm/bochs/bochs_drv.c +++ b/drivers/gpu/drm/bochs/bochs_drv.c @@ -58,10 +58,7 @@ err: return ret; } -static const struct file_operations bochs_fops = { - .owner = THIS_MODULE, - DRM_VRAM_MM_FILE_OPERATIONS -}; +DEFINE_DRM_GEM_FOPS(bochs_fops); static struct drm_driver bochs_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, @@ -114,7 +111,7 @@ static int bochs_pci_probe(struct pci_dev *pdev, return -ENOMEM; } - ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "bochsdrmfb"); + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "bochsdrmfb"); if (ret) return ret; diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c index 02a9c1ed165b..3f0006c2470d 100644 --- a/drivers/gpu/drm/bochs/bochs_kms.c +++ b/drivers/gpu/drm/bochs/bochs_kms.c @@ -69,33 +69,11 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe, } } -static int bochs_pipe_prepare_fb(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *new_state) -{ - struct drm_gem_vram_object *gbo; - - if (!new_state->fb) - return 0; - gbo = drm_gem_vram_of_gem(new_state->fb->obj[0]); - return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); -} - -static void bochs_pipe_cleanup_fb(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state) -{ - struct drm_gem_vram_object *gbo; - - if (!old_state->fb) - return; - gbo = drm_gem_vram_of_gem(old_state->fb->obj[0]); - drm_gem_vram_unpin(gbo); -} - static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = { .enable = bochs_pipe_enable, .update = bochs_pipe_update, - .prepare_fb = bochs_pipe_prepare_fb, - .cleanup_fb = bochs_pipe_cleanup_fb, + .prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb, + .cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb, }; static int bochs_connector_get_modes(struct drm_connector *connector) diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 8f9bb886f7ad..1b74f530b07c 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -11,8 +11,7 @@ int bochs_mm_init(struct bochs_device *bochs) struct drm_vram_mm *vmm; vmm = drm_vram_helper_alloc_mm(bochs->dev, bochs->fb_base, - bochs->fb_size, - &drm_gem_vram_mm_funcs); + bochs->fb_size); return PTR_ERR_OR_ZERO(vmm); } diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 1cc9f502c1f2..34362976cd6f 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -87,8 +87,7 @@ config DRM_SIL_SII8620 depends on OF select DRM_KMS_HELPER imply EXTCON - select INPUT - select RC_CORE + depends on RC_CORE || !RC_CORE help Silicon Image SII8620 HDMI/MHL bridge chip driver. diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix-anx78xx.c index 3c7cc5af735c..274989f96a91 100644 --- a/drivers/gpu/drm/bridge/analogix-anx78xx.c +++ b/drivers/gpu/drm/bridge/analogix-anx78xx.c @@ -19,6 +19,7 @@ #include <linux/types.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_dp_helper.h> #include <drm/drm_edid.h> @@ -38,12 +39,20 @@ #define AUX_CH_BUFFER_SIZE 16 #define AUX_WAIT_TIMEOUT_MS 15 -static const u8 anx78xx_i2c_addresses[] = { - [I2C_IDX_TX_P0] = TX_P0, - [I2C_IDX_TX_P1] = TX_P1, - [I2C_IDX_TX_P2] = TX_P2, - [I2C_IDX_RX_P0] = RX_P0, - [I2C_IDX_RX_P1] = RX_P1, +static const u8 anx7808_i2c_addresses[] = { + [I2C_IDX_TX_P0] = 0x78, + [I2C_IDX_TX_P1] = 0x7a, + [I2C_IDX_TX_P2] = 0x72, + [I2C_IDX_RX_P0] = 0x7e, + [I2C_IDX_RX_P1] = 0x80, +}; + +static const u8 anx781x_i2c_addresses[] = { + [I2C_IDX_TX_P0] = 0x70, + [I2C_IDX_TX_P1] = 0x7a, + [I2C_IDX_TX_P2] = 0x72, + [I2C_IDX_RX_P0] = 0x7e, + [I2C_IDX_RX_P1] = 0x80, }; struct anx78xx_platform_data { @@ -62,7 +71,6 @@ struct anx78xx { struct i2c_client *client; struct edid *edid; struct drm_connector connector; - struct drm_dp_link link; struct anx78xx_platform_data pdata; struct mutex lock; @@ -715,7 +723,9 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx) /* 1.0V digital core power regulator */ pdata->dvdd10 = devm_regulator_get(dev, "dvdd10"); if (IS_ERR(pdata->dvdd10)) { - DRM_ERROR("DVDD10 regulator not found\n"); + if (PTR_ERR(pdata->dvdd10) != -EPROBE_DEFER) + DRM_ERROR("DVDD10 regulator not found\n"); + return PTR_ERR(pdata->dvdd10); } @@ -737,7 +747,7 @@ static int anx78xx_init_pdata(struct anx78xx *anx78xx) static int anx78xx_dp_link_training(struct anx78xx *anx78xx) { - u8 dp_bw, value; + u8 dp_bw, dpcd[2]; int err; err = regmap_write(anx78xx->map[I2C_IDX_RX_P0], SP_HDMI_MUTE_CTRL_REG, @@ -790,18 +800,34 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx) if (err) return err; - /* Check link capabilities */ - err = drm_dp_link_probe(&anx78xx->aux, &anx78xx->link); - if (err < 0) { - DRM_ERROR("Failed to probe link capabilities: %d\n", err); - return err; - } + /* + * Power up the sink (DP_SET_POWER register is only available on DPCD + * v1.1 and later). + */ + if (anx78xx->dpcd[DP_DPCD_REV] >= 0x11) { + err = drm_dp_dpcd_readb(&anx78xx->aux, DP_SET_POWER, &dpcd[0]); + if (err < 0) { + DRM_ERROR("Failed to read DP_SET_POWER register: %d\n", + err); + return err; + } - /* Power up the sink */ - err = drm_dp_link_power_up(&anx78xx->aux, &anx78xx->link); - if (err < 0) { - DRM_ERROR("Failed to power up DisplayPort link: %d\n", err); - return err; + dpcd[0] &= ~DP_SET_POWER_MASK; + dpcd[0] |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(&anx78xx->aux, DP_SET_POWER, dpcd[0]); + if (err < 0) { + DRM_ERROR("Failed to power up DisplayPort link: %d\n", + err); + return err; + } + + /* + * According to the DP 1.1 specification, a "Sink Device must + * exit the power saving state within 1 ms" (Section 2.5.3.1, + * Table 5-52, "Sink Control Field" (register 0x600). + */ + usleep_range(1000, 2000); } /* Possibly enable downspread on the sink */ @@ -840,15 +866,22 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx) if (err) return err; - value = drm_dp_link_rate_to_bw_code(anx78xx->link.rate); + dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd); + dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]); err = regmap_write(anx78xx->map[I2C_IDX_TX_P0], - SP_DP_MAIN_LINK_BW_SET_REG, value); + SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]); if (err) return err; - err = drm_dp_link_configure(&anx78xx->aux, &anx78xx->link); + dpcd[1] = drm_dp_max_lane_count(anx78xx->dpcd); + + if (drm_dp_enhanced_frame_cap(anx78xx->dpcd)) + dpcd[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(&anx78xx->aux, DP_LINK_BW_SET, dpcd, + sizeof(dpcd)); if (err < 0) { - DRM_ERROR("Failed to configure DisplayPort link: %d\n", err); + DRM_ERROR("Failed to configure link: %d\n", err); return err; } @@ -1301,6 +1334,7 @@ static const struct regmap_config anx78xx_regmap_config = { }; static const u16 anx78xx_chipid_list[] = { + 0x7808, 0x7812, 0x7814, 0x7818, @@ -1312,6 +1346,7 @@ static int anx78xx_i2c_probe(struct i2c_client *client, struct anx78xx *anx78xx; struct anx78xx_platform_data *pdata; unsigned int i, idl, idh, version; + const u8 *i2c_addresses; bool found = false; int err; @@ -1332,7 +1367,9 @@ static int anx78xx_i2c_probe(struct i2c_client *client, err = anx78xx_init_pdata(anx78xx); if (err) { - DRM_ERROR("Failed to initialize pdata: %d\n", err); + if (err != -EPROBE_DEFER) + DRM_ERROR("Failed to initialize pdata: %d\n", err); + return err; } @@ -1349,22 +1386,26 @@ static int anx78xx_i2c_probe(struct i2c_client *client, } /* Map slave addresses of ANX7814 */ + i2c_addresses = device_get_match_data(&client->dev); for (i = 0; i < I2C_NUM_ADDRESSES; i++) { - anx78xx->i2c_dummy[i] = i2c_new_dummy(client->adapter, - anx78xx_i2c_addresses[i] >> 1); - if (!anx78xx->i2c_dummy[i]) { - err = -ENOMEM; - DRM_ERROR("Failed to reserve I2C bus %02x\n", - anx78xx_i2c_addresses[i]); + struct i2c_client *i2c_dummy; + + i2c_dummy = i2c_new_dummy_device(client->adapter, + i2c_addresses[i] >> 1); + if (IS_ERR(i2c_dummy)) { + err = PTR_ERR(i2c_dummy); + DRM_ERROR("Failed to reserve I2C bus %02x: %d\n", + i2c_addresses[i], err); goto err_unregister_i2c; } + anx78xx->i2c_dummy[i] = i2c_dummy; anx78xx->map[i] = devm_regmap_init_i2c(anx78xx->i2c_dummy[i], &anx78xx_regmap_config); if (IS_ERR(anx78xx->map[i])) { err = PTR_ERR(anx78xx->map[i]); DRM_ERROR("Failed regmap initialization %02x\n", - anx78xx_i2c_addresses[i]); + i2c_addresses[i]); goto err_unregister_i2c; } } @@ -1463,7 +1504,10 @@ MODULE_DEVICE_TABLE(i2c, anx78xx_id); #if IS_ENABLED(CONFIG_OF) static const struct of_device_id anx78xx_match_table[] = { - { .compatible = "analogix,anx7814", }, + { .compatible = "analogix,anx7808", .data = anx7808_i2c_addresses }, + { .compatible = "analogix,anx7812", .data = anx781x_i2c_addresses }, + { .compatible = "analogix,anx7814", .data = anx781x_i2c_addresses }, + { .compatible = "analogix,anx7818", .data = anx781x_i2c_addresses }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, anx78xx_match_table); diff --git a/drivers/gpu/drm/bridge/analogix-anx78xx.h b/drivers/gpu/drm/bridge/analogix-anx78xx.h index 25e063bcecbc..55d6c2109740 100644 --- a/drivers/gpu/drm/bridge/analogix-anx78xx.h +++ b/drivers/gpu/drm/bridge/analogix-anx78xx.h @@ -6,15 +6,8 @@ #ifndef __ANX78xx_H #define __ANX78xx_H -#define TX_P0 0x70 -#define TX_P1 0x7a -#define TX_P2 0x72 - -#define RX_P0 0x7e -#define RX_P1 0x80 - /***************************************************************/ -/* Register definition of device address 0x7e */ +/* Register definitions for RX_PO */ /***************************************************************/ /* @@ -171,7 +164,7 @@ #define SP_VSI_RCVD BIT(1) /***************************************************************/ -/* Register definition of device address 0x80 */ +/* Register definitions for RX_P1 */ /***************************************************************/ /* HDCP BCAPS Shadow Register */ @@ -217,7 +210,7 @@ #define SP_SET_AVMUTE BIT(0) /***************************************************************/ -/* Register definition of device address 0x70 */ +/* Register definitions for TX_P0 */ /***************************************************************/ /* HDCP Status Register */ @@ -451,7 +444,7 @@ #define SP_DP_BUF_DATA0_REG 0xf0 /***************************************************************/ -/* Register definition of device address 0x72 */ +/* Register definitions for TX_P2 */ /***************************************************************/ /* @@ -674,7 +667,7 @@ #define SP_INT_CTRL_REG 0xff /***************************************************************/ -/* Register definition of device address 0x7a */ +/* Register definitions for TX_P1 */ /***************************************************************/ /* DP TX Link Training Control Register */ diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index 22885dceaa17..bb411fe52ae8 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -21,6 +21,7 @@ #include <drm/bridge/analogix_dp.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_device.h> #include <drm/drm_panel.h> diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c index 6166dca6be81..3a5bd4e7fd1e 100644 --- a/drivers/gpu/drm/bridge/cdns-dsi.c +++ b/drivers/gpu/drm/bridge/cdns-dsi.c @@ -956,7 +956,8 @@ static int cdns_dsi_attach(struct mipi_dsi_host *host, panel = of_drm_find_panel(np); if (!IS_ERR(panel)) { - bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI); + bridge = drm_panel_bridge_add_typed(panel, + DRM_MODE_CONNECTOR_DSI); } else { bridge = of_drm_find_bridge(dev->dev.of_node); if (!bridge) diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c index 7aa789c35882..cc33dc411b9e 100644 --- a/drivers/gpu/drm/bridge/dumb-vga-dac.c +++ b/drivers/gpu/drm/bridge/dumb-vga-dac.c @@ -12,6 +12,7 @@ #include <linux/regulator/consumer.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> diff --git a/drivers/gpu/drm/bridge/lvds-encoder.c b/drivers/gpu/drm/bridge/lvds-encoder.c index 2ab2c234f26c..e2132a8d5106 100644 --- a/drivers/gpu/drm/bridge/lvds-encoder.c +++ b/drivers/gpu/drm/bridge/lvds-encoder.c @@ -106,7 +106,8 @@ static int lvds_encoder_probe(struct platform_device *pdev) } lvds_encoder->panel_bridge = - devm_drm_panel_bridge_add(dev, panel, DRM_MODE_CONNECTOR_LVDS); + devm_drm_panel_bridge_add_typed(dev, panel, + DRM_MODE_CONNECTOR_LVDS); if (IS_ERR(lvds_encoder->panel_bridge)) return PTR_ERR(lvds_encoder->panel_bridge); diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c index 6e81e5db57f2..e8a49f6146c6 100644 --- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c +++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c @@ -25,6 +25,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c index d4a1cc5052c3..57ff01339559 100644 --- a/drivers/gpu/drm/bridge/nxp-ptn3460.c +++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/of.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_of.h> diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index b12ae3a4c5f1..f4e293e7cf64 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -5,6 +5,7 @@ */ #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_connector.h> #include <drm/drm_encoder.h> #include <drm/drm_modeset_helper_vtables.h> @@ -133,8 +134,6 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = { * just calls the appropriate functions from &drm_panel. * * @panel: The drm_panel being wrapped. Must be non-NULL. - * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be - * created. * * For drivers converting from directly using drm_panel: The expected * usage pattern is that during either encoder module probe or DSI @@ -148,11 +147,37 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = { * drm_mode_config_cleanup() if the bridge has already been attached), then * drm_panel_bridge_remove() to free it. * + * The connector type is set to @panel->connector_type, which must be set to a + * known type. Calling this function with a panel whose connector type is + * DRM_MODE_CONNECTOR_Unknown will return NULL. + * * See devm_drm_panel_bridge_add() for an automatically manged version of this * function. */ -struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel, - u32 connector_type) +struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel) +{ + if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown)) + return NULL; + + return drm_panel_bridge_add_typed(panel, panel->connector_type); +} +EXPORT_SYMBOL(drm_panel_bridge_add); + +/** + * drm_panel_bridge_add_typed - Creates a &drm_bridge and &drm_connector with + * an explicit connector type. + * @panel: The drm_panel being wrapped. Must be non-NULL. + * @connector_type: The connector type (DRM_MODE_CONNECTOR_*) + * + * This is just like drm_panel_bridge_add(), but forces the connector type to + * @connector_type instead of infering it from the panel. + * + * This function is deprecated and should not be used in new drivers. Use + * drm_panel_bridge_add() instead, and fix panel drivers as necessary if they + * don't report a connector type. + */ +struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel, + u32 connector_type) { struct panel_bridge *panel_bridge; @@ -176,7 +201,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel, return &panel_bridge->bridge; } -EXPORT_SYMBOL(drm_panel_bridge_add); +EXPORT_SYMBOL(drm_panel_bridge_add_typed); /** * drm_panel_bridge_remove - Unregisters and frees a drm_bridge @@ -213,15 +238,38 @@ static void devm_drm_panel_bridge_release(struct device *dev, void *res) * that just calls the appropriate functions from &drm_panel. * @dev: device to tie the bridge lifetime to * @panel: The drm_panel being wrapped. Must be non-NULL. - * @connector_type: The DRM_MODE_CONNECTOR_* for the connector to be - * created. * * This is the managed version of drm_panel_bridge_add() which automatically * calls drm_panel_bridge_remove() when @dev is unbound. */ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev, - struct drm_panel *panel, - u32 connector_type) + struct drm_panel *panel) +{ + if (WARN_ON(panel->connector_type == DRM_MODE_CONNECTOR_Unknown)) + return NULL; + + return devm_drm_panel_bridge_add_typed(dev, panel, + panel->connector_type); +} +EXPORT_SYMBOL(devm_drm_panel_bridge_add); + +/** + * devm_drm_panel_bridge_add_typed - Creates a managed &drm_bridge and + * &drm_connector with an explicit connector type. + * @dev: device to tie the bridge lifetime to + * @panel: The drm_panel being wrapped. Must be non-NULL. + * @connector_type: The connector type (DRM_MODE_CONNECTOR_*) + * + * This is just like devm_drm_panel_bridge_add(), but forces the connector type + * to @connector_type instead of infering it from the panel. + * + * This function is deprecated and should not be used in new drivers. Use + * devm_drm_panel_bridge_add() instead, and fix panel drivers as necessary if + * they don't report a connector type. + */ +struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev, + struct drm_panel *panel, + u32 connector_type) { struct drm_bridge **ptr, *bridge; @@ -230,7 +278,7 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev, if (!ptr) return ERR_PTR(-ENOMEM); - bridge = drm_panel_bridge_add(panel, connector_type); + bridge = drm_panel_bridge_add_typed(panel, connector_type); if (!IS_ERR(bridge)) { *ptr = bridge; devres_add(dev, ptr); @@ -240,4 +288,4 @@ struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev, return bridge; } -EXPORT_SYMBOL(devm_drm_panel_bridge_add); +EXPORT_SYMBOL(devm_drm_panel_bridge_add_typed); diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c index 93c68e2e9484..b7a72dfdcac3 100644 --- a/drivers/gpu/drm/bridge/parade-ps8622.c +++ b/drivers/gpu/drm/bridge/parade-ps8622.c @@ -17,6 +17,7 @@ #include <linux/regulator/consumer.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c index 38f75ac580df..b70e8c5cf2e1 100644 --- a/drivers/gpu/drm/bridge/sii902x.c +++ b/drivers/gpu/drm/bridge/sii902x.c @@ -20,6 +20,7 @@ #include <linux/clk.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> #include <drm/drm_print.h> diff --git a/drivers/gpu/drm/bridge/sii9234.c b/drivers/gpu/drm/bridge/sii9234.c index 25d4ad8c7ad6..f81f81b7051f 100644 --- a/drivers/gpu/drm/bridge/sii9234.c +++ b/drivers/gpu/drm/bridge/sii9234.c @@ -13,6 +13,7 @@ * Dharam Kumar <dharam.kr@samsung.com> */ #include <drm/bridge/mhl.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> @@ -841,39 +842,28 @@ static int sii9234_init_resources(struct sii9234 *ctx, ctx->client[I2C_MHL] = client; - ctx->client[I2C_TPI] = i2c_new_dummy(adapter, I2C_TPI_ADDR); - if (!ctx->client[I2C_TPI]) { + ctx->client[I2C_TPI] = devm_i2c_new_dummy_device(&client->dev, adapter, + I2C_TPI_ADDR); + if (IS_ERR(ctx->client[I2C_TPI])) { dev_err(ctx->dev, "failed to create TPI client\n"); - return -ENODEV; + return PTR_ERR(ctx->client[I2C_TPI]); } - ctx->client[I2C_HDMI] = i2c_new_dummy(adapter, I2C_HDMI_ADDR); - if (!ctx->client[I2C_HDMI]) { + ctx->client[I2C_HDMI] = devm_i2c_new_dummy_device(&client->dev, adapter, + I2C_HDMI_ADDR); + if (IS_ERR(ctx->client[I2C_HDMI])) { dev_err(ctx->dev, "failed to create HDMI RX client\n"); - goto fail_tpi; + return PTR_ERR(ctx->client[I2C_HDMI]); } - ctx->client[I2C_CBUS] = i2c_new_dummy(adapter, I2C_CBUS_ADDR); - if (!ctx->client[I2C_CBUS]) { + ctx->client[I2C_CBUS] = devm_i2c_new_dummy_device(&client->dev, adapter, + I2C_CBUS_ADDR); + if (IS_ERR(ctx->client[I2C_CBUS])) { dev_err(ctx->dev, "failed to create CBUS client\n"); - goto fail_hdmi; + return PTR_ERR(ctx->client[I2C_CBUS]); } return 0; - -fail_hdmi: - i2c_unregister_device(ctx->client[I2C_HDMI]); -fail_tpi: - i2c_unregister_device(ctx->client[I2C_TPI]); - - return -ENODEV; -} - -static void sii9234_deinit_resources(struct sii9234 *ctx) -{ - i2c_unregister_device(ctx->client[I2C_CBUS]); - i2c_unregister_device(ctx->client[I2C_HDMI]); - i2c_unregister_device(ctx->client[I2C_TPI]); } static inline struct sii9234 *bridge_to_sii9234(struct drm_bridge *bridge) @@ -950,7 +940,6 @@ static int sii9234_remove(struct i2c_client *client) sii9234_cable_out(ctx); drm_bridge_remove(&ctx->bridge); - sii9234_deinit_resources(ctx); return 0; } diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c index bd3165ee5354..4c0eef406eb1 100644 --- a/drivers/gpu/drm/bridge/sil-sii8620.c +++ b/drivers/gpu/drm/bridge/sil-sii8620.c @@ -9,6 +9,7 @@ #include <asm/unaligned.h> #include <drm/bridge/mhl.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_encoder.h> @@ -1759,10 +1760,8 @@ static bool sii8620_rcp_consume(struct sii8620 *ctx, u8 scancode) scancode &= MHL_RCP_KEY_ID_MASK; - if (!ctx->rc_dev) { - dev_dbg(ctx->dev, "RCP input device not initialized\n"); + if (!IS_ENABLED(CONFIG_RC_CORE) || !ctx->rc_dev) return false; - } if (pressed) rc_keydown(ctx->rc_dev, RC_PROTO_CEC, scancode, 0); @@ -2099,6 +2098,9 @@ static void sii8620_init_rcp_input_dev(struct sii8620 *ctx) struct rc_dev *rc_dev; int ret; + if (!IS_ENABLED(CONFIG_RC_CORE)) + return; + rc_dev = rc_allocate_device(RC_DRIVER_SCANCODE); if (!rc_dev) { dev_err(ctx->dev, "Failed to allocate RC device\n"); @@ -2213,6 +2215,9 @@ static void sii8620_detach(struct drm_bridge *bridge) { struct sii8620 *ctx = bridge_to_sii8620(bridge); + if (!IS_ENABLED(CONFIG_RC_CORE)) + return; + rc_unregister_device(ctx->rc_dev); } diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c index ac1e001d0882..70ab4fbdc23e 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-cec.c @@ -285,7 +285,7 @@ static int dw_hdmi_cec_probe(struct platform_device *pdev) ret = cec_register_adapter(cec->adap, pdev->dev.parent); if (ret < 0) { - cec_notifier_cec_adap_unregister(cec->notify); + cec_notifier_cec_adap_unregister(cec->notify, cec->adap); return ret; } @@ -302,7 +302,7 @@ static int dw_hdmi_cec_remove(struct platform_device *pdev) { struct dw_hdmi_cec *cec = platform_get_drvdata(pdev); - cec_notifier_cec_adap_unregister(cec->notify); + cec_notifier_cec_adap_unregister(cec->notify, cec->adap); cec_unregister_adapter(cec->adap); return 0; diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c index 6c2c44d0bdee..d7e65c869415 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi-i2s-audio.c @@ -102,6 +102,7 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data, } dw_hdmi_set_sample_rate(hdmi, hparms->sample_rate); + dw_hdmi_set_channel_status(hdmi, hparms->iec.status); dw_hdmi_set_channel_count(hdmi, hparms->channels); dw_hdmi_set_channel_allocation(hdmi, hparms->cea.channel_allocation); @@ -109,6 +110,14 @@ static int dw_hdmi_i2s_hw_params(struct device *dev, void *data, hdmi_write(audio, conf0, HDMI_AUD_CONF0); hdmi_write(audio, conf1, HDMI_AUD_CONF1); + return 0; +} + +static int dw_hdmi_i2s_audio_startup(struct device *dev, void *data) +{ + struct dw_hdmi_i2s_audio_data *audio = data; + struct dw_hdmi *hdmi = audio->hdmi; + dw_hdmi_audio_enable(hdmi); return 0; @@ -163,6 +172,7 @@ static int dw_hdmi_i2s_hook_plugged_cb(struct device *dev, void *data, static struct hdmi_codec_ops dw_hdmi_i2s_ops = { .hw_params = dw_hdmi_i2s_hw_params, + .audio_startup = dw_hdmi_i2s_audio_startup, .audio_shutdown = dw_hdmi_i2s_audio_shutdown, .get_eld = dw_hdmi_i2s_get_eld, .get_dai_id = dw_hdmi_i2s_get_dai_id, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c index 2102872bf43c..67fca439bbfb 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c @@ -25,7 +25,9 @@ #include <uapi/linux/videodev2.h> #include <drm/bridge/dw_hdmi.h> +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_of.h> #include <drm/drm_print.h> @@ -36,6 +38,7 @@ #include "dw-hdmi-cec.h" #include "dw-hdmi.h" +#define DDC_CI_ADDR 0x37 #define DDC_SEGMENT_ADDR 0x30 #define HDMI_EDID_LEN 512 @@ -424,6 +427,15 @@ static int dw_hdmi_i2c_xfer(struct i2c_adapter *adap, u8 addr = msgs[0].addr; int i, ret = 0; + if (addr == DDC_CI_ADDR) + /* + * The internal I2C controller does not support the multi-byte + * read and write operations needed for DDC/CI. + * TOFIX: Blacklist the DDC/CI address until we filter out + * unsupported I2C operations. + */ + return -EOPNOTSUPP; + dev_dbg(hdmi->dev, "xfer: num: %d, addr: %#x\n", num, addr); for (i = 0; i < num; i++) { @@ -606,6 +618,26 @@ static unsigned int hdmi_compute_n(unsigned int freq, unsigned long pixel_clk) return n; } +/* + * When transmitting IEC60958 linear PCM audio, these registers allow to + * configure the channel status information of all the channel status + * bits in the IEC60958 frame. For the moment this configuration is only + * used when the I2S audio interface, General Purpose Audio (GPA), + * or AHB audio DMA (AHBAUDDMA) interface is active + * (for S/PDIF interface this information comes from the stream). + */ +void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, + u8 *channel_status) +{ + /* + * Set channel status register for frequency and word length. + * Use default values for other registers. + */ + hdmi_writeb(hdmi, channel_status[3], HDMI_FC_AUDSCHNLS7); + hdmi_writeb(hdmi, channel_status[4], HDMI_FC_AUDSCHNLS8); +} +EXPORT_SYMBOL_GPL(dw_hdmi_set_channel_status); + static void hdmi_set_clk_regenerator(struct dw_hdmi *hdmi, unsigned long pixel_clk, unsigned int sample_rate) { @@ -1738,6 +1770,41 @@ static void hdmi_config_vendor_specific_infoframe(struct dw_hdmi *hdmi, HDMI_FC_DATAUTO0_VSD_MASK); } +static void hdmi_config_drm_infoframe(struct dw_hdmi *hdmi) +{ + const struct drm_connector_state *conn_state = hdmi->connector.state; + struct hdmi_drm_infoframe frame; + u8 buffer[30]; + ssize_t err; + int i; + + if (!hdmi->plat_data->use_drm_infoframe) + return; + + hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_DISABLE, + HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN); + + err = drm_hdmi_infoframe_set_hdr_metadata(&frame, conn_state); + if (err < 0) + return; + + err = hdmi_drm_infoframe_pack(&frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(hdmi->dev, "Failed to pack drm infoframe: %zd\n", err); + return; + } + + hdmi_writeb(hdmi, frame.version, HDMI_FC_DRM_HB0); + hdmi_writeb(hdmi, frame.length, HDMI_FC_DRM_HB1); + + for (i = 0; i < frame.length; i++) + hdmi_writeb(hdmi, buffer[4 + i], HDMI_FC_DRM_PB0 + i); + + hdmi_writeb(hdmi, 1, HDMI_FC_DRM_UP); + hdmi_modb(hdmi, HDMI_FC_PACKET_TX_EN_DRM_ENABLE, + HDMI_FC_PACKET_TX_EN_DRM_MASK, HDMI_FC_PACKET_TX_EN); +} + static void hdmi_av_composer(struct dw_hdmi *hdmi, const struct drm_display_mode *mode) { @@ -2049,7 +2116,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) /* HDMI Initialization Step E - Configure audio */ hdmi_clk_regenerator_update_pixel_clock(hdmi); - hdmi_enable_audio_clk(hdmi, true); + hdmi_enable_audio_clk(hdmi, hdmi->audio_enable); } /* not for DVI mode */ @@ -2059,6 +2126,7 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode) /* HDMI Initialization Step F - Configure AVI InfoFrame */ hdmi_config_AVI(hdmi, mode); hdmi_config_vendor_specific_infoframe(hdmi, mode); + hdmi_config_drm_infoframe(hdmi); } else { dev_dbg(hdmi->dev, "%s DVI mode\n", __func__); } @@ -2237,6 +2305,45 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector) return ret; } +static bool hdr_metadata_equal(const struct drm_connector_state *old_state, + const struct drm_connector_state *new_state) +{ + struct drm_property_blob *old_blob = old_state->hdr_output_metadata; + struct drm_property_blob *new_blob = new_state->hdr_output_metadata; + + if (!old_blob || !new_blob) + return old_blob == new_blob; + + if (old_blob->length != new_blob->length) + return false; + + return !memcmp(old_blob->data, new_blob->data, old_blob->length); +} + +static int dw_hdmi_connector_atomic_check(struct drm_connector *connector, + struct drm_atomic_state *state) +{ + struct drm_connector_state *old_state = + drm_atomic_get_old_connector_state(state, connector); + struct drm_connector_state *new_state = + drm_atomic_get_new_connector_state(state, connector); + struct drm_crtc *crtc = new_state->crtc; + struct drm_crtc_state *crtc_state; + + if (!crtc) + return 0; + + if (!hdr_metadata_equal(old_state, new_state)) { + crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + crtc_state->mode_changed = true; + } + + return 0; +} + static void dw_hdmi_connector_force(struct drm_connector *connector) { struct dw_hdmi *hdmi = container_of(connector, struct dw_hdmi, @@ -2261,6 +2368,7 @@ static const struct drm_connector_funcs dw_hdmi_connector_funcs = { static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs = { .get_modes = dw_hdmi_connector_get_modes, + .atomic_check = dw_hdmi_connector_atomic_check, }; static int dw_hdmi_bridge_attach(struct drm_bridge *bridge) @@ -2281,6 +2389,10 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge) DRM_MODE_CONNECTOR_HDMIA, hdmi->ddc); + if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe) + drm_object_attach_property(&connector->base, + connector->dev->mode_config.hdr_output_metadata_property, 0); + drm_connector_attach_encoder(connector, encoder); cec_fill_conn_info_from_drm(&conn_info, connector); diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h index 6988f12d89d9..1999db05bc3b 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h +++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.h @@ -158,6 +158,8 @@ #define HDMI_FC_SPDDEVICEINF 0x1062 #define HDMI_FC_AUDSCONF 0x1063 #define HDMI_FC_AUDSSTAT 0x1064 +#define HDMI_FC_AUDSCHNLS7 0x106e +#define HDMI_FC_AUDSCHNLS8 0x106f #define HDMI_FC_DATACH0FILL 0x1070 #define HDMI_FC_DATACH1FILL 0x1071 #define HDMI_FC_DATACH2FILL 0x1072 @@ -252,6 +254,7 @@ #define HDMI_FC_POL2 0x10DB #define HDMI_FC_PRCONF 0x10E0 #define HDMI_FC_SCRAMBLER_CTRL 0x10E1 +#define HDMI_FC_PACKET_TX_EN 0x10E3 #define HDMI_FC_GMD_STAT 0x1100 #define HDMI_FC_GMD_EN 0x1101 @@ -287,6 +290,37 @@ #define HDMI_FC_GMD_PB26 0x111F #define HDMI_FC_GMD_PB27 0x1120 +#define HDMI_FC_DRM_UP 0x1167 +#define HDMI_FC_DRM_HB0 0x1168 +#define HDMI_FC_DRM_HB1 0x1169 +#define HDMI_FC_DRM_PB0 0x116A +#define HDMI_FC_DRM_PB1 0x116B +#define HDMI_FC_DRM_PB2 0x116C +#define HDMI_FC_DRM_PB3 0x116D +#define HDMI_FC_DRM_PB4 0x116E +#define HDMI_FC_DRM_PB5 0x116F +#define HDMI_FC_DRM_PB6 0x1170 +#define HDMI_FC_DRM_PB7 0x1171 +#define HDMI_FC_DRM_PB8 0x1172 +#define HDMI_FC_DRM_PB9 0x1173 +#define HDMI_FC_DRM_PB10 0x1174 +#define HDMI_FC_DRM_PB11 0x1175 +#define HDMI_FC_DRM_PB12 0x1176 +#define HDMI_FC_DRM_PB13 0x1177 +#define HDMI_FC_DRM_PB14 0x1178 +#define HDMI_FC_DRM_PB15 0x1179 +#define HDMI_FC_DRM_PB16 0x117A +#define HDMI_FC_DRM_PB17 0x117B +#define HDMI_FC_DRM_PB18 0x117C +#define HDMI_FC_DRM_PB19 0x117D +#define HDMI_FC_DRM_PB20 0x117E +#define HDMI_FC_DRM_PB21 0x117F +#define HDMI_FC_DRM_PB22 0x1180 +#define HDMI_FC_DRM_PB23 0x1181 +#define HDMI_FC_DRM_PB24 0x1182 +#define HDMI_FC_DRM_PB25 0x1183 +#define HDMI_FC_DRM_PB26 0x1184 + #define HDMI_FC_DBGFORCE 0x1200 #define HDMI_FC_DBGAUD0CH0 0x1201 #define HDMI_FC_DBGAUD1CH0 0x1202 @@ -742,6 +776,11 @@ enum { HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_MASK = 0x0F, HDMI_FC_PRCONF_OUTPUT_PR_FACTOR_OFFSET = 0, +/* FC_PACKET_TX_EN field values */ + HDMI_FC_PACKET_TX_EN_DRM_MASK = 0x80, + HDMI_FC_PACKET_TX_EN_DRM_ENABLE = 0x80, + HDMI_FC_PACKET_TX_EN_DRM_DISABLE = 0x00, + /* FC_AVICONF0-FC_AVICONF3 field values */ HDMI_FC_AVICONF0_PIX_FMT_MASK = 0x03, HDMI_FC_AVICONF0_PIX_FMT_RGB = 0x00, diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c index 675442bfc1bd..b6e793bb653c 100644 --- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c @@ -316,7 +316,8 @@ static int dw_mipi_dsi_host_attach(struct mipi_dsi_host *host, return ret; if (panel) { - bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DSI); + bridge = drm_panel_bridge_add_typed(panel, + DRM_MODE_CONNECTOR_DSI); if (IS_ERR(bridge)) return PTR_ERR(bridge); } @@ -981,7 +982,6 @@ __dw_mipi_dsi_probe(struct platform_device *pdev, struct device *dev = &pdev->dev; struct reset_control *apb_rst; struct dw_mipi_dsi *dsi; - struct resource *res; int ret; dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); @@ -997,11 +997,7 @@ __dw_mipi_dsi_probe(struct platform_device *pdev, } if (!plat_data->base) { - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return ERR_PTR(-ENODEV); - - dsi->base = devm_ioremap_resource(dev, res); + dsi->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(dsi->base)) return ERR_PTR(-ENODEV); diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c index 170f162ffa55..db298f550a5a 100644 --- a/drivers/gpu/drm/bridge/tc358764.c +++ b/drivers/gpu/drm/bridge/tc358764.c @@ -16,6 +16,7 @@ #include <video/mipi_display.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_fb_helper.h> #include <drm/drm_mipi_dsi.h> diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c index 8a8d605021f0..8029478ffebb 100644 --- a/drivers/gpu/drm/bridge/tc358767.c +++ b/drivers/gpu/drm/bridge/tc358767.c @@ -26,6 +26,7 @@ #include <linux/slab.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_dp_helper.h> #include <drm/drm_edid.h> #include <drm/drm_of.h> @@ -228,7 +229,9 @@ static bool tc_test_pattern; module_param_named(test, tc_test_pattern, bool, 0644); struct tc_edp_link { - struct drm_dp_link base; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + unsigned int rate; + u8 num_lanes; u8 assr; bool scrambler_dis; bool spread; @@ -437,9 +440,9 @@ static u32 tc_srcctrl(struct tc_data *tc) reg |= DP0_SRCCTRL_SCRMBLDIS; /* Scrambler Disabled */ if (tc->link.spread) reg |= DP0_SRCCTRL_SSCG; /* Spread Spectrum Enable */ - if (tc->link.base.num_lanes == 2) + if (tc->link.num_lanes == 2) reg |= DP0_SRCCTRL_LANES_2; /* Two Main Channel Lanes */ - if (tc->link.base.rate != 162000) + if (tc->link.rate != 162000) reg |= DP0_SRCCTRL_BW27; /* 2.7 Gbps link */ return reg; } @@ -662,23 +665,35 @@ err: static int tc_get_display_props(struct tc_data *tc) { + u8 revision, num_lanes; + unsigned int rate; int ret; u8 reg; /* Read DP Rx Link Capability */ - ret = drm_dp_link_probe(&tc->aux, &tc->link.base); + ret = drm_dp_dpcd_read(&tc->aux, DP_DPCD_REV, tc->link.dpcd, + DP_RECEIVER_CAP_SIZE); if (ret < 0) goto err_dpcd_read; - if (tc->link.base.rate != 162000 && tc->link.base.rate != 270000) { + + revision = tc->link.dpcd[DP_DPCD_REV]; + rate = drm_dp_max_link_rate(tc->link.dpcd); + num_lanes = drm_dp_max_lane_count(tc->link.dpcd); + + if (rate != 162000 && rate != 270000) { dev_dbg(tc->dev, "Falling to 2.7 Gbps rate\n"); - tc->link.base.rate = 270000; + rate = 270000; } - if (tc->link.base.num_lanes > 2) { + tc->link.rate = rate; + + if (num_lanes > 2) { dev_dbg(tc->dev, "Falling to 2 lanes\n"); - tc->link.base.num_lanes = 2; + num_lanes = 2; } + tc->link.num_lanes = num_lanes; + ret = drm_dp_dpcd_readb(&tc->aux, DP_MAX_DOWNSPREAD, ®); if (ret < 0) goto err_dpcd_read; @@ -696,11 +711,11 @@ static int tc_get_display_props(struct tc_data *tc) tc->link.assr = reg & DP_ALTERNATE_SCRAMBLER_RESET_ENABLE; dev_dbg(tc->dev, "DPCD rev: %d.%d, rate: %s, lanes: %d, framing: %s\n", - tc->link.base.revision >> 4, tc->link.base.revision & 0x0f, - (tc->link.base.rate == 162000) ? "1.62Gbps" : "2.7Gbps", - tc->link.base.num_lanes, - (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) ? - "enhanced" : "non-enhanced"); + revision >> 4, revision & 0x0f, + (tc->link.rate == 162000) ? "1.62Gbps" : "2.7Gbps", + tc->link.num_lanes, + drm_dp_enhanced_frame_cap(tc->link.dpcd) ? + "enhanced" : "default"); dev_dbg(tc->dev, "Downspread: %s, scrambler: %s\n", tc->link.spread ? "0.5%" : "0.0%", tc->link.scrambler_dis ? "disabled" : "enabled"); @@ -739,7 +754,7 @@ static int tc_set_video_mode(struct tc_data *tc, */ in_bw = mode->clock * bits_per_pixel / 8; - out_bw = tc->link.base.num_lanes * tc->link.base.rate; + out_bw = tc->link.num_lanes * tc->link.rate; max_tu_symbol = DIV_ROUND_UP(in_bw * TU_SIZE_RECOMMENDED, out_bw); dev_dbg(tc->dev, "set mode %dx%d\n", @@ -901,7 +916,7 @@ static int tc_main_link_enable(struct tc_data *tc) /* SSCG and BW27 on DP1 must be set to the same as on DP0 */ ret = regmap_write(tc->regmap, DP1_SRCCTRL, (tc->link.spread ? DP0_SRCCTRL_SSCG : 0) | - ((tc->link.base.rate != 162000) ? DP0_SRCCTRL_BW27 : 0)); + ((tc->link.rate != 162000) ? DP0_SRCCTRL_BW27 : 0)); if (ret) return ret; @@ -911,7 +926,7 @@ static int tc_main_link_enable(struct tc_data *tc) /* Setup Main Link */ dp_phy_ctrl = BGREN | PWR_SW_EN | PHY_A0_EN | PHY_M0_EN; - if (tc->link.base.num_lanes == 2) + if (tc->link.num_lanes == 2) dp_phy_ctrl |= PHY_2LANE; ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl); @@ -974,7 +989,13 @@ static int tc_main_link_enable(struct tc_data *tc) } /* Setup Link & DPRx Config for Training */ - ret = drm_dp_link_configure(aux, &tc->link.base); + tmp[0] = drm_dp_link_rate_to_bw_code(tc->link.rate); + tmp[1] = tc->link.num_lanes; + + if (drm_dp_enhanced_frame_cap(tc->link.dpcd)) + tmp[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + ret = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, tmp, 2); if (ret < 0) goto err_dpcd_write; @@ -1018,9 +1039,8 @@ static int tc_main_link_enable(struct tc_data *tc) /* Enable DP0 to start Link Training */ ret = regmap_write(tc->regmap, DP0CTL, - ((tc->link.base.capabilities & - DP_LINK_CAP_ENHANCED_FRAMING) ? EF_EN : 0) | - DP_EN); + (drm_dp_enhanced_frame_cap(tc->link.dpcd) ? + EF_EN : 0) | DP_EN); if (ret) return ret; @@ -1099,7 +1119,7 @@ static int tc_main_link_enable(struct tc_data *tc) ret = -ENODEV; } - if (tc->link.base.num_lanes == 2) { + if (tc->link.num_lanes == 2) { value = (tmp[0] >> 4) & DP_CHANNEL_EQ_BITS; if (value != DP_CHANNEL_EQ_BITS) { @@ -1170,7 +1190,7 @@ static int tc_stream_enable(struct tc_data *tc) return ret; value = VID_MN_GEN | DP_EN; - if (tc->link.base.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + if (drm_dp_enhanced_frame_cap(tc->link.dpcd)) value |= EF_EN; ret = regmap_write(tc->regmap, DP0CTL, value); if (ret) @@ -1296,7 +1316,7 @@ static enum drm_mode_status tc_mode_valid(struct drm_bridge *bridge, return MODE_CLOCK_HIGH; req = mode->clock * bits_per_pixel / 8; - avail = tc->link.base.num_lanes * tc->link.base.rate; + avail = tc->link.num_lanes * tc->link.rate; if (req > avail) return MODE_BAD; diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 0a580957c8cf..43abf01ebd4c 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -17,6 +17,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_dp_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c index 61cc2354ef1b..aa3198dc9903 100644 --- a/drivers/gpu/drm/bridge/ti-tfp410.c +++ b/drivers/gpu/drm/bridge/ti-tfp410.c @@ -14,6 +14,7 @@ #include <linux/platform_device.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c index 36a69aec8a4b..248c9f765c45 100644 --- a/drivers/gpu/drm/cirrus/cirrus.c +++ b/drivers/gpu/drm/cirrus/cirrus.c @@ -390,7 +390,7 @@ static int cirrus_conn_init(struct cirrus_device *cirrus) /* ------------------------------------------------------------------ */ /* cirrus (simple) display pipe */ -static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_crtc *crtc, +static enum drm_mode_status cirrus_pipe_mode_valid(struct drm_simple_display_pipe *pipe, const struct drm_display_mode *mode) { if (cirrus_check_size(mode->hdisplay, mode->vdisplay, NULL) < 0) @@ -510,7 +510,7 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus) /* ------------------------------------------------------------------ */ -DEFINE_DRM_GEM_SHMEM_FOPS(cirrus_fops); +DEFINE_DRM_GEM_FOPS(cirrus_fops); static struct drm_driver cirrus_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, @@ -532,7 +532,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev, struct cirrus_device *cirrus; int ret; - ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb"); + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "cirrusdrmfb"); if (ret) return ret; diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h deleted file mode 100644 index 1f73916e528e..000000000000 --- a/drivers/gpu/drm/cirrus/cirrus_drv.h +++ /dev/null @@ -1,247 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright 2012 Red Hat - * - * Authors: Matthew Garrett - * Dave Airlie - */ -#ifndef __CIRRUS_DRV_H__ -#define __CIRRUS_DRV_H__ - -#include <video/vga.h> - -#include <drm/drm_encoder.h> -#include <drm/drm_fb_helper.h> - -#include <drm/ttm/ttm_bo_api.h> -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_memory.h> -#include <drm/ttm/ttm_module.h> - -#include <drm/drm_gem.h> - -#define DRIVER_AUTHOR "Matthew Garrett" - -#define DRIVER_NAME "cirrus" -#define DRIVER_DESC "qemu Cirrus emulation" -#define DRIVER_DATE "20110418" - -#define DRIVER_MAJOR 1 -#define DRIVER_MINOR 0 -#define DRIVER_PATCHLEVEL 0 - -#define CIRRUSFB_CONN_LIMIT 1 - -#define RREG8(reg) ioread8(((void __iomem *)cdev->rmmio) + (reg)) -#define WREG8(reg, v) iowrite8(v, ((void __iomem *)cdev->rmmio) + (reg)) -#define RREG32(reg) ioread32(((void __iomem *)cdev->rmmio) + (reg)) -#define WREG32(reg, v) iowrite32(v, ((void __iomem *)cdev->rmmio) + (reg)) - -#define SEQ_INDEX 4 -#define SEQ_DATA 5 - -#define WREG_SEQ(reg, v) \ - do { \ - WREG8(SEQ_INDEX, reg); \ - WREG8(SEQ_DATA, v); \ - } while (0) \ - -#define CRT_INDEX 0x14 -#define CRT_DATA 0x15 - -#define WREG_CRT(reg, v) \ - do { \ - WREG8(CRT_INDEX, reg); \ - WREG8(CRT_DATA, v); \ - } while (0) \ - -#define GFX_INDEX 0xe -#define GFX_DATA 0xf - -#define WREG_GFX(reg, v) \ - do { \ - WREG8(GFX_INDEX, reg); \ - WREG8(GFX_DATA, v); \ - } while (0) \ - -/* - * Cirrus has a "hidden" DAC register that can be accessed by writing to - * the pixel mask register to reset the state, then reading from the register - * four times. The next write will then pass to the DAC - */ -#define VGA_DAC_MASK 0x6 - -#define WREG_HDR(v) \ - do { \ - RREG8(VGA_DAC_MASK); \ - RREG8(VGA_DAC_MASK); \ - RREG8(VGA_DAC_MASK); \ - RREG8(VGA_DAC_MASK); \ - WREG8(VGA_DAC_MASK, v); \ - } while (0) \ - - -#define CIRRUS_MAX_FB_HEIGHT 4096 -#define CIRRUS_MAX_FB_WIDTH 4096 - -#define CIRRUS_DPMS_CLEARED (-1) - -#define to_cirrus_crtc(x) container_of(x, struct cirrus_crtc, base) -#define to_cirrus_encoder(x) container_of(x, struct cirrus_encoder, base) - -struct cirrus_crtc { - struct drm_crtc base; - int last_dpms; - bool enabled; -}; - -struct cirrus_fbdev; -struct cirrus_mode_info { - struct cirrus_crtc *crtc; - /* pointer to fbdev info structure */ - struct cirrus_fbdev *gfbdev; -}; - -struct cirrus_encoder { - struct drm_encoder base; - int last_dpms; -}; - -struct cirrus_connector { - struct drm_connector base; -}; - -struct cirrus_mc { - resource_size_t vram_size; - resource_size_t vram_base; -}; - -struct cirrus_device { - struct drm_device *dev; - unsigned long flags; - - resource_size_t rmmio_base; - resource_size_t rmmio_size; - void __iomem *rmmio; - - struct cirrus_mc mc; - struct cirrus_mode_info mode_info; - - int num_crtc; - int fb_mtrr; - - struct { - struct ttm_bo_device bdev; - } ttm; - bool mm_inited; -}; - - -struct cirrus_fbdev { - struct drm_fb_helper helper; /* must be first */ - struct drm_framebuffer *gfb; - void *sysram; - int size; - int x1, y1, x2, y2; /* dirty rect */ - spinlock_t dirty_lock; -}; - -struct cirrus_bo { - struct ttm_buffer_object bo; - struct ttm_placement placement; - struct ttm_bo_kmap_obj kmap; - struct drm_gem_object gem; - struct ttm_place placements[3]; - int pin_count; -}; -#define gem_to_cirrus_bo(gobj) container_of((gobj), struct cirrus_bo, gem) - -static inline struct cirrus_bo * -cirrus_bo(struct ttm_buffer_object *bo) -{ - return container_of(bo, struct cirrus_bo, bo); -} - - -#define to_cirrus_obj(x) container_of(x, struct cirrus_gem_object, base) - - /* cirrus_main.c */ -int cirrus_device_init(struct cirrus_device *cdev, - struct drm_device *ddev, - struct pci_dev *pdev, - uint32_t flags); -void cirrus_device_fini(struct cirrus_device *cdev); -void cirrus_gem_free_object(struct drm_gem_object *obj); -int cirrus_dumb_mmap_offset(struct drm_file *file, - struct drm_device *dev, - uint32_t handle, - uint64_t *offset); -int cirrus_gem_create(struct drm_device *dev, - u32 size, bool iskernel, - struct drm_gem_object **obj); -int cirrus_dumb_create(struct drm_file *file, - struct drm_device *dev, - struct drm_mode_create_dumb *args); - -int cirrus_framebuffer_init(struct drm_device *dev, - struct drm_framebuffer *gfb, - const struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_gem_object *obj); - -bool cirrus_check_framebuffer(struct cirrus_device *cdev, int width, int height, - int bpp, int pitch); - - /* cirrus_display.c */ -int cirrus_modeset_init(struct cirrus_device *cdev); -void cirrus_modeset_fini(struct cirrus_device *cdev); - - /* cirrus_fbdev.c */ -int cirrus_fbdev_init(struct cirrus_device *cdev); -void cirrus_fbdev_fini(struct cirrus_device *cdev); - - - - /* cirrus_irq.c */ -void cirrus_driver_irq_preinstall(struct drm_device *dev); -int cirrus_driver_irq_postinstall(struct drm_device *dev); -void cirrus_driver_irq_uninstall(struct drm_device *dev); -irqreturn_t cirrus_driver_irq_handler(int irq, void *arg); - - /* cirrus_kms.c */ -int cirrus_driver_load(struct drm_device *dev, unsigned long flags); -void cirrus_driver_unload(struct drm_device *dev); -extern struct drm_ioctl_desc cirrus_ioctls[]; -extern int cirrus_max_ioctl; - -int cirrus_mm_init(struct cirrus_device *cirrus); -void cirrus_mm_fini(struct cirrus_device *cirrus); -void cirrus_ttm_placement(struct cirrus_bo *bo, int domain); -int cirrus_bo_create(struct drm_device *dev, int size, int align, - uint32_t flags, struct cirrus_bo **pcirrusbo); -int cirrus_mmap(struct file *filp, struct vm_area_struct *vma); - -static inline int cirrus_bo_reserve(struct cirrus_bo *bo, bool no_wait) -{ - int ret; - - ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL); - if (ret) { - if (ret != -ERESTARTSYS && ret != -EBUSY) - DRM_ERROR("reserve failed %p\n", bo); - return ret; - } - return 0; -} - -static inline void cirrus_bo_unreserve(struct cirrus_bo *bo) -{ - ttm_bo_unreserve(&bo->bo); -} - -int cirrus_bo_push_sysram(struct cirrus_bo *bo); -int cirrus_bo_pin(struct cirrus_bo *bo, u32 pl_flag, u64 *gpu_addr); - -extern int cirrus_bpp; - -#endif /* __CIRRUS_DRV_H__ */ diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 2dd2cd87cdbb..b191d39c071d 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -31,6 +31,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_uapi.h> +#include <drm/drm_bridge.h> #include <drm/drm_damage_helper.h> #include <drm/drm_device.h> #include <drm/drm_plane_helper.h> @@ -97,17 +98,6 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state, } } -/* - * For connectors that support multiple encoders, either the - * .atomic_best_encoder() or .best_encoder() operation must be implemented. - */ -static struct drm_encoder * -pick_single_encoder_for_connector(struct drm_connector *connector) -{ - WARN_ON(connector->encoder_ids[1]); - return drm_encoder_find(connector->dev, NULL, connector->encoder_ids[0]); -} - static int handle_conflicting_encoders(struct drm_atomic_state *state, bool disable_conflicting_encoders) { @@ -135,7 +125,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state, else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); else - new_encoder = pick_single_encoder_for_connector(connector); + new_encoder = drm_connector_get_single_encoder(connector); if (new_encoder) { if (encoder_mask & drm_encoder_mask(new_encoder)) { @@ -359,7 +349,7 @@ update_connector_routing(struct drm_atomic_state *state, else if (funcs->best_encoder) new_encoder = funcs->best_encoder(connector); else - new_encoder = pick_single_encoder_for_connector(connector); + new_encoder = drm_connector_get_single_encoder(connector); if (!new_encoder) { DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", @@ -482,7 +472,7 @@ mode_fixup(struct drm_atomic_state *state) continue; funcs = crtc->helper_private; - if (!funcs->mode_fixup) + if (!funcs || !funcs->mode_fixup) continue; ret = funcs->mode_fixup(crtc, &new_crtc_state->mode, diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c index 7a26bfb5329c..0d466d3b0809 100644 --- a/drivers/gpu/drm/drm_atomic_uapi.c +++ b/drivers/gpu/drm/drm_atomic_uapi.c @@ -1405,7 +1405,7 @@ retry: } else if (arg->flags & DRM_MODE_ATOMIC_NONBLOCK) { ret = drm_atomic_nonblocking_commit(state); } else { - if (unlikely(drm_debug & DRM_UT_STATE)) + if (drm_debug_enabled(DRM_UT_STATE)) drm_atomic_print_state(state); ret = drm_atomic_commit(state); diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c index 37ac168fcb60..121481f6aa71 100644 --- a/drivers/gpu/drm/drm_blend.c +++ b/drivers/gpu/drm/drm_blend.c @@ -130,7 +130,12 @@ * Z position is set up with drm_plane_create_zpos_immutable_property() and * drm_plane_create_zpos_property(). It controls the visibility of overlapping * planes. Without this property the primary plane is always below the cursor - * plane, and ordering between all other planes is undefined. + * plane, and ordering between all other planes is undefined. The positive + * Z axis points towards the user, i.e. planes with lower Z position values + * are underneath planes with higher Z position values. Two planes with the + * same Z position value have undefined ordering. Note that the Z position + * value can also be immutable, to inform userspace about the hard-coded + * stacking of planes, see drm_plane_create_zpos_immutable_property(). * * pixel blend mode: * Pixel blend mode is set up with drm_plane_create_blend_mode_property(). diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 3bd76e918b5d..03e01b000f7a 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -62,10 +62,10 @@ static void drm_cache_flush_clflush(struct page *pages[], { unsigned long i; - mb(); + mb(); /*Full memory barrier used before so that CLFLUSH is ordered*/ for (i = 0; i < num_pages; i++) drm_clflush_page(*pages++); - mb(); + mb(); /*Also used after CLFLUSH so that all cache is flushed*/ } #endif @@ -92,6 +92,7 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages) #elif defined(__powerpc__) unsigned long i; + for (i = 0; i < num_pages; i++) { struct page *page = pages[i]; void *page_virtual; @@ -125,10 +126,10 @@ drm_clflush_sg(struct sg_table *st) if (static_cpu_has(X86_FEATURE_CLFLUSH)) { struct sg_page_iter sg_iter; - mb(); + mb(); /*CLFLUSH is ordered only by using memory barriers*/ for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) drm_clflush_page(sg_page_iter_page(&sg_iter)); - mb(); + mb(); /*Make sure that all cache line entry is flushed*/ return; } @@ -157,12 +158,13 @@ drm_clflush_virt_range(void *addr, unsigned long length) if (static_cpu_has(X86_FEATURE_CLFLUSH)) { const int size = boot_cpu_data.x86_clflush_size; void *end = addr + length; + addr = (void *)(((unsigned long)addr) & -size); - mb(); + mb(); /*CLFLUSH is only ordered with a full memory barrier*/ for (; addr < end; addr += size) clflushopt(addr); clflushopt(end - 1); /* force serialisation */ - mb(); + mb(); /*Ensure that evry data cache line entry is flushed*/ return; } diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c index c8922b7cac09..895b73f23079 100644 --- a/drivers/gpu/drm/drm_client_modeset.c +++ b/drivers/gpu/drm/drm_client_modeset.c @@ -415,9 +415,8 @@ static bool connector_has_possible_crtc(struct drm_connector *connector, struct drm_crtc *crtc) { struct drm_encoder *encoder; - int i; - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { if (encoder->possible_crtcs & drm_crtc_mask(crtc)) return true; } diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index 4a8b2e5c2af6..2166000ed057 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -365,8 +365,6 @@ EXPORT_SYMBOL(drm_connector_attach_edid_property); int drm_connector_attach_encoder(struct drm_connector *connector, struct drm_encoder *encoder) { - int i; - /* * In the past, drivers have attempted to model the static association * of connector to encoder in simple connector/encoder devices using a @@ -381,18 +379,15 @@ int drm_connector_attach_encoder(struct drm_connector *connector, if (WARN_ON(connector->encoder)) return -EINVAL; - for (i = 0; i < ARRAY_SIZE(connector->encoder_ids); i++) { - if (connector->encoder_ids[i] == 0) { - connector->encoder_ids[i] = encoder->base.id; - return 0; - } - } - return -ENOMEM; + connector->possible_encoders |= drm_encoder_mask(encoder); + + return 0; } EXPORT_SYMBOL(drm_connector_attach_encoder); /** - * drm_connector_has_possible_encoder - check if the connector and encoder are assosicated with each other + * drm_connector_has_possible_encoder - check if the connector and encoder are + * associated with each other * @connector: the connector * @encoder: the encoder * @@ -402,15 +397,7 @@ EXPORT_SYMBOL(drm_connector_attach_encoder); bool drm_connector_has_possible_encoder(struct drm_connector *connector, struct drm_encoder *encoder) { - struct drm_encoder *enc; - int i; - - drm_connector_for_each_possible_encoder(connector, enc, i) { - if (enc == encoder) - return true; - } - - return false; + return connector->possible_encoders & drm_encoder_mask(encoder); } EXPORT_SYMBOL(drm_connector_has_possible_encoder); @@ -480,7 +467,10 @@ EXPORT_SYMBOL(drm_connector_cleanup); * drm_connector_register - register a connector * @connector: the connector to register * - * Register userspace interfaces for a connector + * Register userspace interfaces for a connector. Only call this for connectors + * which can be hotplugged after drm_dev_register() has been called already, + * e.g. DP MST connectors. All other connectors will be registered automatically + * when calling drm_dev_register(). * * Returns: * Zero on success, error code on failure. @@ -526,7 +516,10 @@ EXPORT_SYMBOL(drm_connector_register); * drm_connector_unregister - unregister a connector * @connector: the connector to unregister * - * Unregister userspace interfaces for a connector + * Unregister userspace interfaces for a connector. Only call this for + * connectors which have registered explicitly by calling drm_dev_register(), + * since connectors are unregistered automatically when drm_dev_unregister() is + * called. */ void drm_connector_unregister(struct drm_connector *connector) { @@ -882,6 +875,38 @@ static const struct drm_prop_enum_list hdmi_colorspaces[] = { { DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER, "DCI-P3_RGB_Theater" }, }; +/* + * As per DP 1.4a spec, 2.2.5.7.5 VSC SDP Payload for Pixel Encoding/Colorimetry + * Format Table 2-120 + */ +static const struct drm_prop_enum_list dp_colorspaces[] = { + /* For Default case, driver will set the colorspace */ + { DRM_MODE_COLORIMETRY_DEFAULT, "Default" }, + { DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED, "RGB_Wide_Gamut_Fixed_Point" }, + /* Colorimetry based on scRGB (IEC 61966-2-2) */ + { DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT, "RGB_Wide_Gamut_Floating_Point" }, + /* Colorimetry based on IEC 61966-2-5 */ + { DRM_MODE_COLORIMETRY_OPRGB, "opRGB" }, + /* Colorimetry based on SMPTE RP 431-2 */ + { DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65, "DCI-P3_RGB_D65" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_RGB, "BT2020_RGB" }, + { DRM_MODE_COLORIMETRY_BT601_YCC, "BT601_YCC" }, + { DRM_MODE_COLORIMETRY_BT709_YCC, "BT709_YCC" }, + /* Standard Definition Colorimetry based on IEC 61966-2-4 */ + { DRM_MODE_COLORIMETRY_XVYCC_601, "XVYCC_601" }, + /* High Definition Colorimetry based on IEC 61966-2-4 */ + { DRM_MODE_COLORIMETRY_XVYCC_709, "XVYCC_709" }, + /* Colorimetry based on IEC 61966-2-1/Amendment 1 */ + { DRM_MODE_COLORIMETRY_SYCC_601, "SYCC_601" }, + /* Colorimetry based on IEC 61966-2-5 [33] */ + { DRM_MODE_COLORIMETRY_OPYCC_601, "opYCC_601" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_CYCC, "BT2020_CYCC" }, + /* Colorimetry based on ITU-R BT.2020 */ + { DRM_MODE_COLORIMETRY_BT2020_YCC, "BT2020_YCC" }, +}; + /** * DOC: standard connector properties * @@ -1674,7 +1699,6 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); * DOC: standard connector properties * * Colorspace: - * drm_mode_create_colorspace_property - create colorspace property * This property helps select a suitable colorspace based on the sink * capability. Modern sink devices support wider gamut like BT2020. * This helps switch to BT2020 mode if the BT2020 encoded video stream @@ -1694,32 +1718,68 @@ EXPORT_SYMBOL(drm_mode_create_aspect_ratio_property); * - This property is just to inform sink what colorspace * source is trying to drive. * + * Because between HDMI and DP have different colorspaces, + * drm_mode_create_hdmi_colorspace_property() is used for HDMI connector and + * drm_mode_create_dp_colorspace_property() is used for DP connector. + */ + +/** + * drm_mode_create_hdmi_colorspace_property - create hdmi colorspace property + * @connector: connector to create the Colorspace property on. + * * Called by a driver the first time it's needed, must be attached to desired - * connectors. + * HDMI connectors. + * + * Returns: + * Zero on success, negative errono on failure. */ -int drm_mode_create_colorspace_property(struct drm_connector *connector) +int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector) { struct drm_device *dev = connector->dev; - struct drm_property *prop; - if (connector->connector_type == DRM_MODE_CONNECTOR_HDMIA || - connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) { - prop = drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, - "Colorspace", - hdmi_colorspaces, - ARRAY_SIZE(hdmi_colorspaces)); - if (!prop) - return -ENOMEM; - } else { - DRM_DEBUG_KMS("Colorspace property not supported\n"); + if (connector->colorspace_property) return 0; - } - connector->colorspace_property = prop; + connector->colorspace_property = + drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace", + hdmi_colorspaces, + ARRAY_SIZE(hdmi_colorspaces)); + + if (!connector->colorspace_property) + return -ENOMEM; + + return 0; +} +EXPORT_SYMBOL(drm_mode_create_hdmi_colorspace_property); + +/** + * drm_mode_create_dp_colorspace_property - create dp colorspace property + * @connector: connector to create the Colorspace property on. + * + * Called by a driver the first time it's needed, must be attached to desired + * DP connectors. + * + * Returns: + * Zero on success, negative errono on failure. + */ +int drm_mode_create_dp_colorspace_property(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + + if (connector->colorspace_property) + return 0; + + connector->colorspace_property = + drm_property_create_enum(dev, DRM_MODE_PROP_ENUM, "Colorspace", + dp_colorspaces, + ARRAY_SIZE(dp_colorspaces)); + + if (!connector->colorspace_property) + return -ENOMEM; return 0; } -EXPORT_SYMBOL(drm_mode_create_colorspace_property); +EXPORT_SYMBOL(drm_mode_create_dp_colorspace_property); /** * drm_mode_create_content_type_property - create content type property @@ -2121,7 +2181,6 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, int encoders_count = 0; int ret = 0; int copied = 0; - int i; struct drm_mode_modeinfo u_mode; struct drm_mode_modeinfo __user *mode_ptr; uint32_t __user *encoder_ptr; @@ -2136,14 +2195,13 @@ int drm_mode_getconnector(struct drm_device *dev, void *data, if (!connector) return -ENOENT; - drm_connector_for_each_possible_encoder(connector, encoder, i) - encoders_count++; + encoders_count = hweight32(connector->possible_encoders); if ((out_resp->count_encoders >= encoders_count) && encoders_count) { copied = 0; encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr); - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { if (put_user(encoder->base.id, encoder_ptr + copied)) { ret = -EFAULT; goto out; diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 80ddf13ad996..499b05aaccfc 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -36,6 +36,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_atomic_uapi.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> @@ -459,6 +460,22 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) __drm_helper_disable_unused_functions(dev); } +/* + * For connectors that support multiple encoders, either the + * .atomic_best_encoder() or .best_encoder() operation must be implemented. + */ +struct drm_encoder * +drm_connector_get_single_encoder(struct drm_connector *connector) +{ + struct drm_encoder *encoder; + + WARN_ON(hweight32(connector->possible_encoders) > 1); + drm_connector_for_each_possible_encoder(connector, encoder) + return encoder; + + return NULL; +} + /** * drm_crtc_helper_set_config - set a new config from userspace * @set: mode set configuration @@ -624,7 +641,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set, new_encoder = connector->encoder; for (ro = 0; ro < set->num_connectors; ro++) { if (set->connectors[ro] == connector) { - new_encoder = connector_funcs->best_encoder(connector); + if (connector_funcs->best_encoder) + new_encoder = connector_funcs->best_encoder(connector); + else + new_encoder = drm_connector_get_single_encoder(connector); + /* if we can't get an encoder for a connector we are setting now - then fail */ if (new_encoder == NULL) diff --git a/drivers/gpu/drm/drm_crtc_helper_internal.h b/drivers/gpu/drm/drm_crtc_helper_internal.h index b5ac1581e623..f0a66ef47e5a 100644 --- a/drivers/gpu/drm/drm_crtc_helper_internal.h +++ b/drivers/gpu/drm/drm_crtc_helper_internal.h @@ -75,3 +75,6 @@ enum drm_mode_status drm_encoder_mode_valid(struct drm_encoder *encoder, const struct drm_display_mode *mode); enum drm_mode_status drm_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode); + +struct drm_encoder * +drm_connector_get_single_encoder(struct drm_connector *connector); diff --git a/drivers/gpu/drm/drm_damage_helper.c b/drivers/gpu/drm/drm_damage_helper.c index 8230dac01a89..3a4126dc2520 100644 --- a/drivers/gpu/drm/drm_damage_helper.c +++ b/drivers/gpu/drm/drm_damage_helper.c @@ -212,8 +212,14 @@ retry: drm_for_each_plane(plane, fb->dev) { struct drm_plane_state *plane_state; - if (plane->state->fb != fb) + ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); + if (ret) + goto out; + + if (plane->state->fb != fb) { + drm_modeset_unlock(&plane->mutex); continue; + } plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) { diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c index be1b7ba92ffe..ca3c55c6b815 100644 --- a/drivers/gpu/drm/drm_debugfs_crc.c +++ b/drivers/gpu/drm/drm_debugfs_crc.c @@ -334,19 +334,17 @@ static ssize_t crtc_crc_read(struct file *filep, char __user *user_buf, return LINE_LEN(crc->values_cnt); } -static unsigned int crtc_crc_poll(struct file *file, poll_table *wait) +static __poll_t crtc_crc_poll(struct file *file, poll_table *wait) { struct drm_crtc *crtc = file->f_inode->i_private; struct drm_crtc_crc *crc = &crtc->crc; - unsigned ret; + __poll_t ret = 0; poll_wait(file, &crc->wq, wait); spin_lock_irq(&crc->lock); if (crc->source && crtc_crc_data_count(crc)) - ret = POLLIN | POLLRDNORM; - else - ret = 0; + ret |= EPOLLIN | EPOLLRDNORM; spin_unlock_irq(&crc->lock); return ret; diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c index b15cee85b702..3ab2609f9ec7 100644 --- a/drivers/gpu/drm/drm_dp_cec.c +++ b/drivers/gpu/drm/drm_dp_cec.c @@ -8,9 +8,13 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> -#include <drm/drm_dp_helper.h> + #include <media/cec.h> +#include <drm/drm_connector.h> +#include <drm/drm_device.h> +#include <drm/drm_dp_helper.h> + /* * Unfortunately it turns out that we have a chicken-and-egg situation * here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters @@ -295,7 +299,10 @@ static void drm_dp_cec_unregister_work(struct work_struct *work) */ void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid) { - u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD; + struct drm_connector *connector = aux->cec.connector; + u32 cec_caps = CEC_CAP_DEFAULTS | CEC_CAP_NEEDS_HPD | + CEC_CAP_CONNECTOR_INFO; + struct cec_connector_info conn_info; unsigned int num_las = 1; u8 cap; @@ -344,13 +351,17 @@ void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid) /* Create a new adapter */ aux->cec.adap = cec_allocate_adapter(&drm_dp_cec_adap_ops, - aux, aux->cec.name, cec_caps, + aux, connector->name, cec_caps, num_las); if (IS_ERR(aux->cec.adap)) { aux->cec.adap = NULL; goto unlock; } - if (cec_register_adapter(aux->cec.adap, aux->cec.parent)) { + + cec_fill_conn_info_from_drm(&conn_info, connector); + cec_s_conn_info(aux->cec.adap, &conn_info); + + if (cec_register_adapter(aux->cec.adap, connector->dev->dev)) { cec_delete_adapter(aux->cec.adap); aux->cec.adap = NULL; } else { @@ -406,22 +417,20 @@ EXPORT_SYMBOL(drm_dp_cec_unset_edid); /** * drm_dp_cec_register_connector() - register a new connector * @aux: DisplayPort AUX channel - * @name: name of the CEC device - * @parent: parent device + * @connector: drm connector * * A new connector was registered with associated CEC adapter name and * CEC adapter parent device. After registering the name and parent * drm_dp_cec_set_edid() is called to check if the connector supports * CEC and to register a CEC adapter if that is the case. */ -void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name, - struct device *parent) +void drm_dp_cec_register_connector(struct drm_dp_aux *aux, + struct drm_connector *connector) { WARN_ON(aux->cec.adap); if (WARN_ON(!aux->transfer)) return; - aux->cec.name = name; - aux->cec.parent = parent; + aux->cec.connector = connector; INIT_DELAYED_WORK(&aux->cec.unregister_work, drm_dp_cec_unregister_work); } diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c index ffc68d305afe..2c7870aef469 100644 --- a/drivers/gpu/drm/drm_dp_helper.c +++ b/drivers/gpu/drm/drm_dp_helper.c @@ -120,33 +120,49 @@ u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SI } EXPORT_SYMBOL(drm_dp_get_adjust_request_pre_emphasis); -void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & - DP_TRAINING_AUX_RD_MASK; +u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE], + unsigned int lane) +{ + unsigned int offset = DP_ADJUST_REQUEST_POST_CURSOR2; + u8 value = dp_link_status(link_status, offset); + + return (value >> (lane << 1)) & 0x3; +} +EXPORT_SYMBOL(drm_dp_get_adjust_request_post_cursor); + +void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK; if (rd_interval > 4) - DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n", + DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n", rd_interval); if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14) - udelay(100); + rd_interval = 100; else - mdelay(rd_interval * 4); + rd_interval *= 4 * USEC_PER_MSEC; + + usleep_range(rd_interval, rd_interval * 2); } EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay); -void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) { - int rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & - DP_TRAINING_AUX_RD_MASK; +void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]) +{ + unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK; if (rd_interval > 4) - DRM_DEBUG_KMS("AUX interval %d, out of range (max 4)\n", + DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n", rd_interval); if (rd_interval == 0) - udelay(400); + rd_interval = 400; else - mdelay(rd_interval * 4); + rd_interval *= 4 * USEC_PER_MSEC; + + usleep_range(rd_interval, rd_interval * 2); } EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay); @@ -220,7 +236,6 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request, } ret = aux->transfer(aux, &msg); - if (ret >= 0) { native_reply = msg.reply & DP_AUX_NATIVE_REPLY_MASK; if (native_reply == DP_AUX_NATIVE_REPLY_ACK) { @@ -337,134 +352,6 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, EXPORT_SYMBOL(drm_dp_dpcd_read_link_status); /** - * drm_dp_link_probe() - probe a DisplayPort link for capabilities - * @aux: DisplayPort AUX channel - * @link: pointer to structure in which to return link capabilities - * - * The structure filled in by this function can usually be passed directly - * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and - * configure the link based on the link's capabilities. - * - * Returns 0 on success or a negative error code on failure. - */ -int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link) -{ - u8 values[3]; - int err; - - memset(link, 0, sizeof(*link)); - - err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values)); - if (err < 0) - return err; - - link->revision = values[0]; - link->rate = drm_dp_bw_code_to_link_rate(values[1]); - link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK; - - if (values[2] & DP_ENHANCED_FRAME_CAP) - link->capabilities |= DP_LINK_CAP_ENHANCED_FRAMING; - - return 0; -} -EXPORT_SYMBOL(drm_dp_link_probe); - -/** - * drm_dp_link_power_up() - power up a DisplayPort link - * @aux: DisplayPort AUX channel - * @link: pointer to a structure containing the link configuration - * - * Returns 0 on success or a negative error code on failure. - */ -int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link) -{ - u8 value; - int err; - - /* DP_SET_POWER register is only available on DPCD v1.1 and later */ - if (link->revision < 0x11) - return 0; - - err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); - if (err < 0) - return err; - - value &= ~DP_SET_POWER_MASK; - value |= DP_SET_POWER_D0; - - err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); - if (err < 0) - return err; - - /* - * According to the DP 1.1 specification, a "Sink Device must exit the - * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink - * Control Field" (register 0x600). - */ - usleep_range(1000, 2000); - - return 0; -} -EXPORT_SYMBOL(drm_dp_link_power_up); - -/** - * drm_dp_link_power_down() - power down a DisplayPort link - * @aux: DisplayPort AUX channel - * @link: pointer to a structure containing the link configuration - * - * Returns 0 on success or a negative error code on failure. - */ -int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link) -{ - u8 value; - int err; - - /* DP_SET_POWER register is only available on DPCD v1.1 and later */ - if (link->revision < 0x11) - return 0; - - err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); - if (err < 0) - return err; - - value &= ~DP_SET_POWER_MASK; - value |= DP_SET_POWER_D3; - - err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); - if (err < 0) - return err; - - return 0; -} -EXPORT_SYMBOL(drm_dp_link_power_down); - -/** - * drm_dp_link_configure() - configure a DisplayPort link - * @aux: DisplayPort AUX channel - * @link: pointer to a structure containing the link configuration - * - * Returns 0 on success or a negative error code on failure. - */ -int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link) -{ - u8 values[2]; - int err; - - values[0] = drm_dp_link_rate_to_bw_code(link->rate); - values[1] = link->num_lanes; - - if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) - values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; - - err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); - if (err < 0) - return err; - - return 0; -} -EXPORT_SYMBOL(drm_dp_link_configure); - -/** * drm_dp_downstream_max_clock() - extract branch device max * pixel rate for legacy VGA * converter or max TMDS clock @@ -1109,6 +996,14 @@ EXPORT_SYMBOL(drm_dp_aux_init); * @aux: DisplayPort AUX channel * * Automatically calls drm_dp_aux_init() if this hasn't been done yet. + * This should only be called when the underlying &struct drm_connector is + * initialiazed already. Therefore the best place to call this is from + * &drm_connector_funcs.late_register. Not that drivers which don't follow this + * will Oops when CONFIG_DRM_DP_AUX_CHARDEV is enabled. + * + * Drivers which need to use the aux channel before that point (e.g. at driver + * load time, before drm_dev_register() has been called) need to call + * drm_dp_aux_init(). * * Returns 0 on success or a negative error code on failure. */ diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 82add736e17d..ae5809a1f19a 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -28,15 +28,22 @@ #include <linux/sched.h> #include <linux/seq_file.h> +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) +#include <linux/stacktrace.h> +#include <linux/sort.h> +#include <linux/timekeeping.h> +#include <linux/math64.h> +#endif + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_dp_mst_helper.h> #include <drm/drm_drv.h> -#include <drm/drm_fixed.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include "drm_crtc_helper_internal.h" +#include "drm_dp_mst_topology_internal.h" /** * DOC: dp mst helper @@ -45,9 +52,14 @@ * protocol. The helpers contain a topology manager and bandwidth manager. * The helpers encapsulate the sending and received of sideband msgs. */ +struct drm_dp_pending_up_req { + struct drm_dp_sideband_msg_hdr hdr; + struct drm_dp_sideband_msg_req_body msg; + struct list_head next; +}; + static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, char *buf); -static int test_calc_pbn_mode(void); static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port); @@ -62,8 +74,8 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); -static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb); +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port); @@ -74,6 +86,8 @@ static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux); static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux); static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr); +#define DBG_PREFIX "[dp_mst]" + #define DP_STR(x) [DP_ ## x] = #x static const char *drm_dp_mst_req_type_str(u8 req_type) @@ -130,6 +144,43 @@ static const char *drm_dp_mst_nak_reason_str(u8 nak_reason) } #undef DP_STR +#define DP_STR(x) [DRM_DP_SIDEBAND_TX_ ## x] = #x + +static const char *drm_dp_mst_sideband_tx_state_str(int state) +{ + static const char * const sideband_reason_str[] = { + DP_STR(QUEUED), + DP_STR(START_SEND), + DP_STR(SENT), + DP_STR(RX), + DP_STR(TIMEOUT), + }; + + if (state >= ARRAY_SIZE(sideband_reason_str) || + !sideband_reason_str[state]) + return "unknown"; + + return sideband_reason_str[state]; +} + +static int +drm_dp_mst_rad_to_str(const u8 rad[8], u8 lct, char *out, size_t len) +{ + int i; + u8 unpacked_rad[16]; + + for (i = 0; i < lct; i++) { + if (i % 2) + unpacked_rad[i] = rad[i / 2] >> 4; + else + unpacked_rad[i] = rad[i / 2] & BIT_MASK(4); + } + + /* TODO: Eventually add something to printk so we can format the rad + * like this: 1.2.3 + */ + return snprintf(out, len, "%*phC", lct, unpacked_rad); +} /* sideband msg handling */ static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles) @@ -262,8 +313,9 @@ static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr, return true; } -static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req, - struct drm_dp_sideband_msg_tx *raw) +void +drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req, + struct drm_dp_sideband_msg_tx *raw) { int idx = 0; int i; @@ -272,6 +324,8 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req, switch (req->req_type) { case DP_ENUM_PATH_RESOURCES: + case DP_POWER_DOWN_PHY: + case DP_POWER_UP_PHY: buf[idx] = (req->u.port_num.port_number & 0xf) << 4; idx++; break; @@ -359,14 +413,253 @@ static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req, memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes); idx += req->u.i2c_write.num_bytes; break; + } + raw->cur_len = idx; +} +EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_encode_sideband_req); + +/* Decode a sideband request we've encoded, mainly used for debugging */ +int +drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw, + struct drm_dp_sideband_msg_req_body *req) +{ + const u8 *buf = raw->msg; + int i, idx = 0; + req->req_type = buf[idx++] & 0x7f; + switch (req->req_type) { + case DP_ENUM_PATH_RESOURCES: case DP_POWER_DOWN_PHY: case DP_POWER_UP_PHY: - buf[idx] = (req->u.port_num.port_number & 0xf) << 4; - idx++; + req->u.port_num.port_number = (buf[idx] >> 4) & 0xf; + break; + case DP_ALLOCATE_PAYLOAD: + { + struct drm_dp_allocate_payload *a = + &req->u.allocate_payload; + + a->number_sdp_streams = buf[idx] & 0xf; + a->port_number = (buf[idx] >> 4) & 0xf; + + WARN_ON(buf[++idx] & 0x80); + a->vcpi = buf[idx] & 0x7f; + + a->pbn = buf[++idx] << 8; + a->pbn |= buf[++idx]; + + idx++; + for (i = 0; i < a->number_sdp_streams; i++) { + a->sdp_stream_sink[i] = + (buf[idx + (i / 2)] >> ((i % 2) ? 0 : 4)) & 0xf; + } + } + break; + case DP_QUERY_PAYLOAD: + req->u.query_payload.port_number = (buf[idx] >> 4) & 0xf; + WARN_ON(buf[++idx] & 0x80); + req->u.query_payload.vcpi = buf[idx] & 0x7f; + break; + case DP_REMOTE_DPCD_READ: + { + struct drm_dp_remote_dpcd_read *r = &req->u.dpcd_read; + + r->port_number = (buf[idx] >> 4) & 0xf; + + r->dpcd_address = (buf[idx] << 16) & 0xf0000; + r->dpcd_address |= (buf[++idx] << 8) & 0xff00; + r->dpcd_address |= buf[++idx] & 0xff; + + r->num_bytes = buf[++idx]; + } + break; + case DP_REMOTE_DPCD_WRITE: + { + struct drm_dp_remote_dpcd_write *w = + &req->u.dpcd_write; + + w->port_number = (buf[idx] >> 4) & 0xf; + + w->dpcd_address = (buf[idx] << 16) & 0xf0000; + w->dpcd_address |= (buf[++idx] << 8) & 0xff00; + w->dpcd_address |= buf[++idx] & 0xff; + + w->num_bytes = buf[++idx]; + + w->bytes = kmemdup(&buf[++idx], w->num_bytes, + GFP_KERNEL); + if (!w->bytes) + return -ENOMEM; + } + break; + case DP_REMOTE_I2C_READ: + { + struct drm_dp_remote_i2c_read *r = &req->u.i2c_read; + struct drm_dp_remote_i2c_read_tx *tx; + bool failed = false; + + r->num_transactions = buf[idx] & 0x3; + r->port_number = (buf[idx] >> 4) & 0xf; + for (i = 0; i < r->num_transactions; i++) { + tx = &r->transactions[i]; + + tx->i2c_dev_id = buf[++idx] & 0x7f; + tx->num_bytes = buf[++idx]; + tx->bytes = kmemdup(&buf[++idx], + tx->num_bytes, + GFP_KERNEL); + if (!tx->bytes) { + failed = true; + break; + } + idx += tx->num_bytes; + tx->no_stop_bit = (buf[idx] >> 5) & 0x1; + tx->i2c_transaction_delay = buf[idx] & 0xf; + } + + if (failed) { + for (i = 0; i < r->num_transactions; i++) + kfree(tx->bytes); + return -ENOMEM; + } + + r->read_i2c_device_id = buf[++idx] & 0x7f; + r->num_bytes_read = buf[++idx]; + } + break; + case DP_REMOTE_I2C_WRITE: + { + struct drm_dp_remote_i2c_write *w = &req->u.i2c_write; + + w->port_number = (buf[idx] >> 4) & 0xf; + w->write_i2c_device_id = buf[++idx] & 0x7f; + w->num_bytes = buf[++idx]; + w->bytes = kmemdup(&buf[++idx], w->num_bytes, + GFP_KERNEL); + if (!w->bytes) + return -ENOMEM; + } + break; + } + + return 0; +} +EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_decode_sideband_req); + +void +drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req, + int indent, struct drm_printer *printer) +{ + int i; + +#define P(f, ...) drm_printf_indent(printer, indent, f, ##__VA_ARGS__) + if (req->req_type == DP_LINK_ADDRESS) { + /* No contents to print */ + P("type=%s\n", drm_dp_mst_req_type_str(req->req_type)); + return; + } + + P("type=%s contents:\n", drm_dp_mst_req_type_str(req->req_type)); + indent++; + + switch (req->req_type) { + case DP_ENUM_PATH_RESOURCES: + case DP_POWER_DOWN_PHY: + case DP_POWER_UP_PHY: + P("port=%d\n", req->u.port_num.port_number); + break; + case DP_ALLOCATE_PAYLOAD: + P("port=%d vcpi=%d pbn=%d sdp_streams=%d %*ph\n", + req->u.allocate_payload.port_number, + req->u.allocate_payload.vcpi, req->u.allocate_payload.pbn, + req->u.allocate_payload.number_sdp_streams, + req->u.allocate_payload.number_sdp_streams, + req->u.allocate_payload.sdp_stream_sink); + break; + case DP_QUERY_PAYLOAD: + P("port=%d vcpi=%d\n", + req->u.query_payload.port_number, + req->u.query_payload.vcpi); + break; + case DP_REMOTE_DPCD_READ: + P("port=%d dpcd_addr=%05x len=%d\n", + req->u.dpcd_read.port_number, req->u.dpcd_read.dpcd_address, + req->u.dpcd_read.num_bytes); + break; + case DP_REMOTE_DPCD_WRITE: + P("port=%d addr=%05x len=%d: %*ph\n", + req->u.dpcd_write.port_number, + req->u.dpcd_write.dpcd_address, + req->u.dpcd_write.num_bytes, req->u.dpcd_write.num_bytes, + req->u.dpcd_write.bytes); + break; + case DP_REMOTE_I2C_READ: + P("port=%d num_tx=%d id=%d size=%d:\n", + req->u.i2c_read.port_number, + req->u.i2c_read.num_transactions, + req->u.i2c_read.read_i2c_device_id, + req->u.i2c_read.num_bytes_read); + + indent++; + for (i = 0; i < req->u.i2c_read.num_transactions; i++) { + const struct drm_dp_remote_i2c_read_tx *rtx = + &req->u.i2c_read.transactions[i]; + + P("%d: id=%03d size=%03d no_stop_bit=%d tx_delay=%03d: %*ph\n", + i, rtx->i2c_dev_id, rtx->num_bytes, + rtx->no_stop_bit, rtx->i2c_transaction_delay, + rtx->num_bytes, rtx->bytes); + } + break; + case DP_REMOTE_I2C_WRITE: + P("port=%d id=%d size=%d: %*ph\n", + req->u.i2c_write.port_number, + req->u.i2c_write.write_i2c_device_id, + req->u.i2c_write.num_bytes, req->u.i2c_write.num_bytes, + req->u.i2c_write.bytes); + break; + default: + P("???\n"); + break; + } +#undef P +} +EXPORT_SYMBOL_FOR_TESTS_ONLY(drm_dp_dump_sideband_msg_req_body); + +static inline void +drm_dp_mst_dump_sideband_msg_tx(struct drm_printer *p, + const struct drm_dp_sideband_msg_tx *txmsg) +{ + struct drm_dp_sideband_msg_req_body req; + char buf[64]; + int ret; + int i; + + drm_dp_mst_rad_to_str(txmsg->dst->rad, txmsg->dst->lct, buf, + sizeof(buf)); + drm_printf(p, "txmsg cur_offset=%x cur_len=%x seqno=%x state=%s path_msg=%d dst=%s\n", + txmsg->cur_offset, txmsg->cur_len, txmsg->seqno, + drm_dp_mst_sideband_tx_state_str(txmsg->state), + txmsg->path_msg, buf); + + ret = drm_dp_decode_sideband_req(txmsg, &req); + if (ret) { + drm_printf(p, "<failed to decode sideband req: %d>\n", ret); + return; + } + drm_dp_dump_sideband_msg_req_body(&req, 1, p); + + switch (req.req_type) { + case DP_REMOTE_DPCD_WRITE: + kfree(req.u.dpcd_write.bytes); + break; + case DP_REMOTE_I2C_READ: + for (i = 0; i < req.u.i2c_read.num_transactions; i++) + kfree(req.u.i2c_read.transactions[i].bytes); + break; + case DP_REMOTE_I2C_WRITE: + kfree(req.u.i2c_write.bytes); break; } - raw->cur_len = idx; } static void drm_dp_crc_sideband_chunk_req(u8 *msg, u8 len) @@ -842,11 +1135,11 @@ static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, clear_bit(vcpi - 1, &mgr->vcpi_mask); for (i = 0; i < mgr->max_payloads; i++) { - if (mgr->proposed_vcpis[i]) - if (mgr->proposed_vcpis[i]->vcpi == vcpi) { - mgr->proposed_vcpis[i] = NULL; - clear_bit(i + 1, &mgr->payload_mask); - } + if (mgr->proposed_vcpis[i] && + mgr->proposed_vcpis[i]->vcpi == vcpi) { + mgr->proposed_vcpis[i] = NULL; + clear_bit(i + 1, &mgr->payload_mask); + } } mutex_unlock(&mgr->payload_lock); } @@ -899,6 +1192,11 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, } } out: + if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { + struct drm_printer p = drm_debug_printer(DBG_PREFIX); + + drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); + } mutex_unlock(&mgr->qlock); return ret; @@ -1108,39 +1406,194 @@ drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port) } EXPORT_SYMBOL(drm_dp_mst_put_port_malloc); -static void drm_dp_destroy_mst_branch_device(struct kref *kref) +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + +#define STACK_DEPTH 8 + +static noinline void +__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_ref_history *history, + enum drm_dp_mst_topology_ref_type type) { - struct drm_dp_mst_branch *mstb = - container_of(kref, struct drm_dp_mst_branch, topology_kref); - struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; - struct drm_dp_mst_port *port, *tmp; - bool wake_tx = false; + struct drm_dp_mst_topology_ref_entry *entry = NULL; + depot_stack_handle_t backtrace; + ulong stack_entries[STACK_DEPTH]; + uint n; + int i; - mutex_lock(&mgr->lock); - list_for_each_entry_safe(port, tmp, &mstb->ports, next) { - list_del(&port->next); - drm_dp_mst_topology_put_port(port); + n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1); + backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL); + if (!backtrace) + return; + + /* Try to find an existing entry for this backtrace */ + for (i = 0; i < history->len; i++) { + if (history->entries[i].backtrace == backtrace) { + entry = &history->entries[i]; + break; + } } - mutex_unlock(&mgr->lock); - /* drop any tx slots msg */ - mutex_lock(&mstb->mgr->qlock); - if (mstb->tx_slots[0]) { - mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; - mstb->tx_slots[0] = NULL; - wake_tx = true; + /* Otherwise add one */ + if (!entry) { + struct drm_dp_mst_topology_ref_entry *new; + int new_len = history->len + 1; + + new = krealloc(history->entries, sizeof(*new) * new_len, + GFP_KERNEL); + if (!new) + return; + + entry = &new[history->len]; + history->len = new_len; + history->entries = new; + + entry->backtrace = backtrace; + entry->type = type; + entry->count = 0; } - if (mstb->tx_slots[1]) { - mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; - mstb->tx_slots[1] = NULL; - wake_tx = true; + entry->count++; + entry->ts_nsec = ktime_get_ns(); +} + +static int +topology_ref_history_cmp(const void *a, const void *b) +{ + const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b; + + if (entry_a->ts_nsec > entry_b->ts_nsec) + return 1; + else if (entry_a->ts_nsec < entry_b->ts_nsec) + return -1; + else + return 0; +} + +static inline const char * +topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type) +{ + if (type == DRM_DP_MST_TOPOLOGY_REF_GET) + return "get"; + else + return "put"; +} + +static void +__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history, + void *ptr, const char *type_str) +{ + struct drm_printer p = drm_debug_printer(DBG_PREFIX); + char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + int i; + + if (!buf) + return; + + if (!history->len) + goto out; + + /* First, sort the list so that it goes from oldest to newest + * reference entry + */ + sort(history->entries, history->len, sizeof(*history->entries), + topology_ref_history_cmp, NULL); + + drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n", + type_str, ptr); + + for (i = 0; i < history->len; i++) { + const struct drm_dp_mst_topology_ref_entry *entry = + &history->entries[i]; + ulong *entries; + uint nr_entries; + u64 ts_nsec = entry->ts_nsec; + u32 rem_nsec = do_div(ts_nsec, 1000000000); + + nr_entries = stack_depot_fetch(entry->backtrace, &entries); + stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4); + + drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s", + entry->count, + topology_ref_type_to_str(entry->type), + ts_nsec, rem_nsec / 1000, buf); } - mutex_unlock(&mstb->mgr->qlock); - if (wake_tx) - wake_up_all(&mstb->mgr->tx_waitq); + /* Now free the history, since this is the only time we expose it */ + kfree(history->entries); +out: + kfree(buf); +} - drm_dp_mst_put_mstb_malloc(mstb); +static __always_inline void +drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) +{ + __dump_topology_ref_history(&mstb->topology_ref_history, mstb, + "MSTB"); +} + +static __always_inline void +drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) +{ + __dump_topology_ref_history(&port->topology_ref_history, port, + "Port"); +} + +static __always_inline void +save_mstb_topology_ref(struct drm_dp_mst_branch *mstb, + enum drm_dp_mst_topology_ref_type type) +{ + __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type); +} + +static __always_inline void +save_port_topology_ref(struct drm_dp_mst_port *port, + enum drm_dp_mst_topology_ref_type type) +{ + __topology_ref_save(port->mgr, &port->topology_ref_history, type); +} + +static inline void +topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->topology_ref_history_lock); +} + +static inline void +topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_unlock(&mgr->topology_ref_history_lock); +} +#else +static inline void +topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {} +static inline void +topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {} +static inline void +drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {} +static inline void +drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {} +#define save_mstb_topology_ref(mstb, type) +#define save_port_topology_ref(port, type) +#endif + +static void drm_dp_destroy_mst_branch_device(struct kref *kref) +{ + struct drm_dp_mst_branch *mstb = + container_of(kref, struct drm_dp_mst_branch, topology_kref); + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + + drm_dp_mst_dump_mstb_topology_history(mstb); + + INIT_LIST_HEAD(&mstb->destroy_next); + + /* + * This can get called under mgr->mutex, so we need to perform the + * actual destruction of the mstb in another worker + */ + mutex_lock(&mgr->delayed_destroy_lock); + list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list); + mutex_unlock(&mgr->delayed_destroy_lock); + schedule_work(&mgr->delayed_destroy_work); } /** @@ -1168,11 +1621,17 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) static int __must_check drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) { - int ret = kref_get_unless_zero(&mstb->topology_kref); + int ret; - if (ret) - DRM_DEBUG("mstb %p (%d)\n", mstb, - kref_read(&mstb->topology_kref)); + topology_ref_history_lock(mstb->mgr); + ret = kref_get_unless_zero(&mstb->topology_kref); + if (ret) { + DRM_DEBUG("mstb %p (%d)\n", + mstb, kref_read(&mstb->topology_kref)); + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); + } + + topology_ref_history_unlock(mstb->mgr); return ret; } @@ -1193,9 +1652,14 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) */ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) { + topology_ref_history_lock(mstb->mgr); + + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); WARN_ON(kref_read(&mstb->topology_kref) == 0); kref_get(&mstb->topology_kref); DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); + + topology_ref_history_unlock(mstb->mgr); } /** @@ -1213,27 +1677,14 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) static void drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) { + topology_ref_history_lock(mstb->mgr); + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1); - kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); -} + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT); -static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) -{ - struct drm_dp_mst_branch *mstb; - - switch (old_pdt) { - case DP_PEER_DEVICE_DP_LEGACY_CONV: - case DP_PEER_DEVICE_SST_SINK: - /* remove i2c over sideband */ - drm_dp_mst_unregister_i2c_bus(&port->aux); - break; - case DP_PEER_DEVICE_MST_BRANCHING: - mstb = port->mstb; - port->mstb = NULL; - drm_dp_mst_topology_put_mstb(mstb); - break; - } + topology_ref_history_unlock(mstb->mgr); + kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); } static void drm_dp_destroy_port(struct kref *kref) @@ -1242,31 +1693,24 @@ static void drm_dp_destroy_port(struct kref *kref) container_of(kref, struct drm_dp_mst_port, topology_kref); struct drm_dp_mst_topology_mgr *mgr = port->mgr; - if (!port->input) { - kfree(port->cached_edid); + drm_dp_mst_dump_port_topology_history(port); - /* - * The only time we don't have a connector - * on an output port is if the connector init - * fails. - */ - if (port->connector) { - /* we can't destroy the connector here, as - * we might be holding the mode_config.mutex - * from an EDID retrieval */ - - mutex_lock(&mgr->destroy_connector_lock); - list_add(&port->next, &mgr->destroy_connector_list); - mutex_unlock(&mgr->destroy_connector_lock); - schedule_work(&mgr->destroy_connector_work); - return; - } - /* no need to clean up vcpi - * as if we have no connector we never setup a vcpi */ - drm_dp_port_teardown_pdt(port, port->pdt); - port->pdt = DP_PEER_DEVICE_NONE; + /* There's nothing that needs locking to destroy an input port yet */ + if (port->input) { + drm_dp_mst_put_port_malloc(port); + return; } - drm_dp_mst_put_port_malloc(port); + + kfree(port->cached_edid); + + /* + * we can't destroy the connector here, as we might be holding the + * mode_config.mutex from an EDID retrieval + */ + mutex_lock(&mgr->delayed_destroy_lock); + list_add(&port->next, &mgr->destroy_port_list); + mutex_unlock(&mgr->delayed_destroy_lock); + schedule_work(&mgr->delayed_destroy_work); } /** @@ -1294,12 +1738,17 @@ static void drm_dp_destroy_port(struct kref *kref) static int __must_check drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) { - int ret = kref_get_unless_zero(&port->topology_kref); + int ret; - if (ret) - DRM_DEBUG("port %p (%d)\n", port, - kref_read(&port->topology_kref)); + topology_ref_history_lock(port->mgr); + ret = kref_get_unless_zero(&port->topology_kref); + if (ret) { + DRM_DEBUG("port %p (%d)\n", + port, kref_read(&port->topology_kref)); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); + } + topology_ref_history_unlock(port->mgr); return ret; } @@ -1318,9 +1767,14 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) */ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) { + topology_ref_history_lock(port->mgr); + WARN_ON(kref_read(&port->topology_kref) == 0); kref_get(&port->topology_kref); DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref)); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); + + topology_ref_history_unlock(port->mgr); } /** @@ -1336,8 +1790,13 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) */ static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) { + topology_ref_history_lock(port->mgr); + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref) - 1); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT); + + topology_ref_history_unlock(port->mgr); kref_put(&port->topology_kref, drm_dp_destroy_port); } @@ -1454,38 +1913,79 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, return parent_lct + 1; } -/* - * return sends link address for new mstb - */ -static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) +static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt) { - int ret; - u8 rad[6], lct; - bool send_link = false; + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + struct drm_dp_mst_branch *mstb; + u8 rad[8], lct; + int ret = 0; + + if (port->pdt == new_pdt) + return 0; + + /* Teardown the old pdt, if there is one */ + switch (port->pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + /* + * If the new PDT would also have an i2c bus, don't bother + * with reregistering it + */ + if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || + new_pdt == DP_PEER_DEVICE_SST_SINK) { + port->pdt = new_pdt; + return 0; + } + + /* remove i2c over sideband */ + drm_dp_mst_unregister_i2c_bus(&port->aux); + break; + case DP_PEER_DEVICE_MST_BRANCHING: + mutex_lock(&mgr->lock); + drm_dp_mst_topology_put_mstb(port->mstb); + port->mstb = NULL; + mutex_unlock(&mgr->lock); + break; + } + + port->pdt = new_pdt; switch (port->pdt) { case DP_PEER_DEVICE_DP_LEGACY_CONV: case DP_PEER_DEVICE_SST_SINK: /* add i2c over sideband */ ret = drm_dp_mst_register_i2c_bus(&port->aux); break; + case DP_PEER_DEVICE_MST_BRANCHING: lct = drm_dp_calculate_rad(port, rad); + mstb = drm_dp_add_mst_branch_device(lct, rad); + if (!mstb) { + ret = -ENOMEM; + DRM_ERROR("Failed to create MSTB for port %p", port); + goto out; + } - port->mstb = drm_dp_add_mst_branch_device(lct, rad); - if (port->mstb) { - port->mstb->mgr = port->mgr; - port->mstb->port_parent = port; - /* - * Make sure this port's memory allocation stays - * around until its child MSTB releases it - */ - drm_dp_mst_get_port_malloc(port); + mutex_lock(&mgr->lock); + port->mstb = mstb; + mstb->mgr = port->mgr; + mstb->port_parent = port; - send_link = true; - } + /* + * Make sure this port's memory allocation stays + * around until its child MSTB releases it + */ + drm_dp_mst_get_port_malloc(port); + mutex_unlock(&mgr->lock); + + /* And make sure we send a link address for this */ + ret = 1; break; } - return send_link; + +out: + if (ret < 0) + port->pdt = DP_PEER_DEVICE_NONE; + return ret; } /** @@ -1617,44 +2117,131 @@ void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, } EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister); -static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, - struct drm_device *dev, - struct drm_dp_link_addr_reply_port *port_msg) +static void +drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port) +{ + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + char proppath[255]; + int ret; + + build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); + port->connector = mgr->cbs->add_connector(mgr, port, proppath); + if (!port->connector) { + ret = -ENOMEM; + goto error; + } + + if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || + port->pdt == DP_PEER_DEVICE_SST_SINK) && + port->port_num >= DP_MST_LOGICAL_PORT_0) { + port->cached_edid = drm_get_edid(port->connector, + &port->aux.ddc); + drm_connector_set_tile_property(port->connector); + } + + mgr->cbs->register_connector(port->connector); + return; + +error: + DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret); +} + +/* + * Drop a topology reference, and unlink the port from the in-memory topology + * layout + */ +static void +drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + mutex_lock(&mgr->lock); + list_del(&port->next); + mutex_unlock(&mgr->lock); + drm_dp_mst_topology_put_port(port); +} + +static struct drm_dp_mst_port * +drm_dp_mst_add_port(struct drm_device *dev, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, u8 port_number) +{ + struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL); + + if (!port) + return NULL; + + kref_init(&port->topology_kref); + kref_init(&port->malloc_kref); + port->parent = mstb; + port->port_num = port_number; + port->mgr = mgr; + port->aux.name = "DPMST"; + port->aux.dev = dev->dev; + port->aux.is_remote = true; + + /* + * Make sure the memory allocation for our parent branch stays + * around until our own memory allocation is released + */ + drm_dp_mst_get_mstb_malloc(mstb); + + return port; +} + +static int +drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, + struct drm_device *dev, + struct drm_dp_link_addr_reply_port *port_msg) { + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - bool ret; - bool created = false; - int old_pdt = 0; - int old_ddps = 0; + int old_ddps = 0, ret; + u8 new_pdt = DP_PEER_DEVICE_NONE; + bool created = false, send_link_addr = false, changed = false; port = drm_dp_get_port(mstb, port_msg->port_number); if (!port) { - port = kzalloc(sizeof(*port), GFP_KERNEL); + port = drm_dp_mst_add_port(dev, mgr, mstb, + port_msg->port_number); if (!port) - return; - kref_init(&port->topology_kref); - kref_init(&port->malloc_kref); - port->parent = mstb; - port->port_num = port_msg->port_number; - port->mgr = mstb->mgr; - port->aux.name = "DPMST"; - port->aux.dev = dev->dev; - port->aux.is_remote = true; - - /* - * Make sure the memory allocation for our parent branch stays - * around until our own memory allocation is released + return -ENOMEM; + created = true; + changed = true; + } else if (!port->input && port_msg->input_port && port->connector) { + /* Since port->connector can't be changed here, we create a + * new port if input_port changes from 0 to 1 */ - drm_dp_mst_get_mstb_malloc(mstb); - + drm_dp_mst_topology_unlink_port(mgr, port); + drm_dp_mst_topology_put_port(port); + port = drm_dp_mst_add_port(dev, mgr, mstb, + port_msg->port_number); + if (!port) + return -ENOMEM; + changed = true; created = true; - } else { - old_pdt = port->pdt; + } else if (port->input && !port_msg->input_port) { + changed = true; + } else if (port->connector) { + /* We're updating a port that's exposed to userspace, so do it + * under lock + */ + drm_modeset_lock(&mgr->base.lock, NULL); + old_ddps = port->ddps; + changed = port->ddps != port_msg->ddps || + (port->ddps && + (port->ldps != port_msg->legacy_device_plug_status || + port->dpcd_rev != port_msg->dpcd_revision || + port->mcs != port_msg->mcs || + port->pdt != port_msg->peer_device_type || + port->num_sdp_stream_sinks != + port_msg->num_sdp_stream_sinks)); } - port->pdt = port_msg->peer_device_type; port->input = port_msg->input_port; + if (!port->input) + new_pdt = port_msg->peer_device_type; port->mcs = port_msg->mcs; port->ddps = port_msg->ddps; port->ldps = port_msg->legacy_device_plug_status; @@ -1665,77 +2252,104 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb, /* manage mstb port lists with mgr lock - take a reference for this list */ if (created) { - mutex_lock(&mstb->mgr->lock); + mutex_lock(&mgr->lock); drm_dp_mst_topology_get_port(port); list_add(&port->next, &mstb->ports); - mutex_unlock(&mstb->mgr->lock); + mutex_unlock(&mgr->lock); } if (old_ddps != port->ddps) { if (port->ddps) { if (!port->input) { - drm_dp_send_enum_path_resources(mstb->mgr, - mstb, port); + drm_dp_send_enum_path_resources(mgr, mstb, + port); } } else { port->available_pbn = 0; } } - if (old_pdt != port->pdt && !port->input) { - drm_dp_port_teardown_pdt(port, old_pdt); - - ret = drm_dp_port_setup_pdt(port); - if (ret == true) - drm_dp_send_link_address(mstb->mgr, port->mstb); + ret = drm_dp_port_set_pdt(port, new_pdt); + if (ret == 1) { + send_link_addr = true; + } else if (ret < 0) { + DRM_ERROR("Failed to change PDT on port %p: %d\n", + port, ret); + goto fail; } - if (created && !port->input) { - char proppath[255]; - - build_mst_prop_path(mstb, port->port_num, proppath, - sizeof(proppath)); - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, - port, - proppath); - if (!port->connector) { - /* remove it from the port list */ - mutex_lock(&mstb->mgr->lock); - list_del(&port->next); - mutex_unlock(&mstb->mgr->lock); - /* drop port list reference */ - drm_dp_mst_topology_put_port(port); - goto out; - } - if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || - port->pdt == DP_PEER_DEVICE_SST_SINK) && - port->port_num >= DP_MST_LOGICAL_PORT_0) { - port->cached_edid = drm_get_edid(port->connector, - &port->aux.ddc); - drm_connector_set_tile_property(port->connector); - } - (*mstb->mgr->cbs->register_connector)(port->connector); + /* + * If this port wasn't just created, then we're reprobing because + * we're coming out of suspend. In this case, always resend the link + * address if there's an MSTB on this port + */ + if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING) + send_link_addr = true; + + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); + else if (!port->input) + drm_dp_mst_port_add_connector(mstb, port); + + if (send_link_addr && port->mstb) { + ret = drm_dp_send_link_address(mgr, port->mstb); + if (ret == 1) /* MSTB below us changed */ + changed = true; + else if (ret < 0) + goto fail_put; } -out: /* put reference to this port */ drm_dp_mst_topology_put_port(port); + return changed; + +fail: + drm_dp_mst_topology_unlink_port(mgr, port); + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); +fail_put: + drm_dp_mst_topology_put_port(port); + return ret; } -static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, - struct drm_dp_connection_status_notify *conn_stat) +static void +drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, + struct drm_dp_connection_status_notify *conn_stat) { + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - int old_pdt; - int old_ddps; - bool dowork = false; + int old_ddps, ret; + u8 new_pdt; + bool dowork = false, create_connector = false; + port = drm_dp_get_port(mstb, conn_stat->port_number); if (!port) return; + if (port->connector) { + if (!port->input && conn_stat->input_port) { + /* + * We can't remove a connector from an already exposed + * port, so just throw the port out and make sure we + * reprobe the link address of it's parent MSTB + */ + drm_dp_mst_topology_unlink_port(mgr, port); + mstb->link_address_sent = false; + dowork = true; + goto out; + } + + /* Locking is only needed if the port's exposed to userspace */ + drm_modeset_lock(&mgr->base.lock, NULL); + } else if (port->input && !conn_stat->input_port) { + create_connector = true; + /* Reprobe link address so we get num_sdp_streams */ + mstb->link_address_sent = false; + dowork = true; + } + old_ddps = port->ddps; - old_pdt = port->pdt; - port->pdt = conn_stat->peer_device_type; + port->input = conn_stat->input_port; port->mcs = conn_stat->message_capability_status; port->ldps = conn_stat->legacy_device_plug_status; port->ddps = conn_stat->displayport_device_plug_status; @@ -1747,17 +2361,27 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb, port->available_pbn = 0; } } - if (old_pdt != port->pdt && !port->input) { - drm_dp_port_teardown_pdt(port, old_pdt); - if (drm_dp_port_setup_pdt(port)) - dowork = true; + new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; + + ret = drm_dp_port_set_pdt(port, new_pdt); + if (ret == 1) { + dowork = true; + } else if (ret < 0) { + DRM_ERROR("Failed to change PDT for port %p: %d\n", + port, ret); + dowork = false; } + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); + else if (create_connector) + drm_dp_mst_port_add_connector(mstb, port); + +out: drm_dp_mst_topology_put_port(port); if (dowork) queue_work(system_long_wq, &mstb->mgr->work); - } static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, @@ -1800,7 +2424,7 @@ out: static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( struct drm_dp_mst_branch *mstb, - uint8_t *guid) + const uint8_t *guid) { struct drm_dp_mst_branch *found_mstb; struct drm_dp_mst_port *port; @@ -1824,7 +2448,7 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper( static struct drm_dp_mst_branch * drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, - uint8_t *guid) + const uint8_t *guid) { struct drm_dp_mst_branch *mstb; int ret; @@ -1843,41 +2467,62 @@ drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, return mstb; } -static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, +static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { struct drm_dp_mst_port *port; - struct drm_dp_mst_branch *mstb_child; - if (!mstb->link_address_sent) - drm_dp_send_link_address(mgr, mstb); + int ret; + bool changed = false; + + if (!mstb->link_address_sent) { + ret = drm_dp_send_link_address(mgr, mstb); + if (ret == 1) + changed = true; + else if (ret < 0) + return ret; + } list_for_each_entry(port, &mstb->ports, next) { - if (port->input) - continue; + struct drm_dp_mst_branch *mstb_child = NULL; - if (!port->ddps) + if (port->input || !port->ddps) continue; - if (!port->available_pbn) + if (!port->available_pbn) { + drm_modeset_lock(&mgr->base.lock, NULL); drm_dp_send_enum_path_resources(mgr, mstb, port); + drm_modeset_unlock(&mgr->base.lock); + changed = true; + } - if (port->mstb) { + if (port->mstb) mstb_child = drm_dp_mst_topology_get_mstb_validated( mgr, port->mstb); - if (mstb_child) { - drm_dp_check_and_send_link_address(mgr, mstb_child); - drm_dp_mst_topology_put_mstb(mstb_child); - } + + if (mstb_child) { + ret = drm_dp_check_and_send_link_address(mgr, + mstb_child); + drm_dp_mst_topology_put_mstb(mstb_child); + if (ret == 1) + changed = true; + else if (ret < 0) + return ret; } } + + return changed; } static void drm_dp_mst_link_probe_work(struct work_struct *work) { - struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, work); + struct drm_device *dev = mgr->dev; struct drm_dp_mst_branch *mstb; int ret; + mutex_lock(&mgr->probe_lock); + mutex_lock(&mgr->lock); mstb = mgr->mst_primary; if (mstb) { @@ -1886,10 +2531,17 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) mstb = NULL; } mutex_unlock(&mgr->lock); - if (mstb) { - drm_dp_check_and_send_link_address(mgr, mstb); - drm_dp_mst_topology_put_mstb(mstb); + if (!mstb) { + mutex_unlock(&mgr->probe_lock); + return; } + + ret = drm_dp_check_and_send_link_address(mgr, mstb); + drm_dp_mst_topology_put_mstb(mstb); + + mutex_unlock(&mgr->probe_lock); + if (ret) + drm_kms_helper_hotplug_event(dev); } static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, @@ -2035,8 +2687,11 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, idx += tosend + 1; ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); - if (ret) { - DRM_DEBUG_KMS("sideband msg failed to send\n"); + if (unlikely(ret) && drm_debug_enabled(DRM_UT_DP)) { + struct drm_printer p = drm_debug_printer(DBG_PREFIX); + + drm_printf(&p, "sideband msg failed to send\n"); + drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); return ret; } @@ -2098,21 +2753,52 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, { mutex_lock(&mgr->qlock); list_add_tail(&txmsg->next, &mgr->tx_msg_downq); + + if (drm_debug_enabled(DRM_UT_DP)) { + struct drm_printer p = drm_debug_printer(DBG_PREFIX); + + drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); + } + if (list_is_singular(&mgr->tx_msg_downq)) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } -static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, +static void +drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply) +{ + struct drm_dp_link_addr_reply_port *port_reply; + int i; + + for (i = 0; i < reply->nports; i++) { + port_reply = &reply->ports[i]; + DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", + i, + port_reply->input_port, + port_reply->peer_device_type, + port_reply->port_number, + port_reply->dpcd_revision, + port_reply->mcs, + port_reply->ddps, + port_reply->legacy_device_plug_status, + port_reply->num_sdp_streams, + port_reply->num_sdp_stream_sinks); + } +} + +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { - int len; struct drm_dp_sideband_msg_tx *txmsg; - int ret; + struct drm_dp_link_address_ack_reply *reply; + struct drm_dp_mst_port *port, *tmp; + int i, len, ret, port_mask = 0; + bool changed = false; txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) - return; + return -ENOMEM; txmsg->dst = mstb; len = build_link_address(txmsg); @@ -2120,48 +2806,67 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, mstb->link_address_sent = true; drm_dp_queue_down_tx(mgr, txmsg); + /* FIXME: Actually do some real error handling here */ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); - if (ret > 0) { - int i; + if (ret <= 0) { + DRM_ERROR("Sending link address failed with %d\n", ret); + goto out; + } + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { + DRM_ERROR("link address NAK received\n"); + ret = -EIO; + goto out; + } - if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { - DRM_DEBUG_KMS("link address nak received\n"); - } else { - DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports); - for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { - DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i, - txmsg->reply.u.link_addr.ports[i].input_port, - txmsg->reply.u.link_addr.ports[i].peer_device_type, - txmsg->reply.u.link_addr.ports[i].port_number, - txmsg->reply.u.link_addr.ports[i].dpcd_revision, - txmsg->reply.u.link_addr.ports[i].mcs, - txmsg->reply.u.link_addr.ports[i].ddps, - txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status, - txmsg->reply.u.link_addr.ports[i].num_sdp_streams, - txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); - } + reply = &txmsg->reply.u.link_addr; + DRM_DEBUG_KMS("link address reply: %d\n", reply->nports); + drm_dp_dump_link_address(reply); - drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid); + drm_dp_check_mstb_guid(mstb, reply->guid); - for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { - drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); - } - drm_kms_helper_hotplug_event(mgr->dev); - } - } else { - mstb->link_address_sent = false; - DRM_DEBUG_KMS("link address failed %d\n", ret); + for (i = 0; i < reply->nports; i++) { + port_mask |= BIT(reply->ports[i].port_number); + ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev, + &reply->ports[i]); + if (ret == 1) + changed = true; + else if (ret < 0) + goto out; } + /* Prune any ports that are currently a part of mstb in our in-memory + * topology, but were not seen in this link address. Usually this + * means that they were removed while the topology was out of sync, + * e.g. during suspend/resume + */ + mutex_lock(&mgr->lock); + list_for_each_entry_safe(port, tmp, &mstb->ports, next) { + if (port_mask & BIT(port->port_num)) + continue; + + DRM_DEBUG_KMS("port %d was not in link address, removing\n", + port->port_num); + list_del(&port->next); + drm_dp_mst_topology_put_port(port); + changed = true; + } + mutex_unlock(&mgr->lock); + +out: + if (ret <= 0) + mstb->link_address_sent = false; kfree(txmsg); + return ret < 0 ? ret : changed; } -static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb, - struct drm_dp_mst_port *port) +static int +drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port) { - int len; + struct drm_dp_enum_path_resources_ack_reply *path_res; struct drm_dp_sideband_msg_tx *txmsg; + int len; int ret; txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); @@ -2175,14 +2880,20 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); if (ret > 0) { + path_res = &txmsg->reply.u.path_resources; + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { DRM_DEBUG_KMS("enum path resources nak received\n"); } else { - if (port->port_num != txmsg->reply.u.path_resources.port_number) + if (port->port_num != path_res->port_number) DRM_ERROR("got incorrect port in response\n"); - DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number, - txmsg->reply.u.path_resources.avail_payload_bw_number); - port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number; + + DRM_DEBUG_KMS("enum path resources %d: %d %d\n", + path_res->port_number, + path_res->full_payload_bw_number, + path_res->avail_payload_bw_number); + port->available_pbn = + path_res->avail_payload_bw_number; } } @@ -2655,30 +3366,13 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, return 0; } -static bool drm_dp_get_vc_payload_bw(int dp_link_bw, - int dp_link_count, - int *out) +static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count) { - switch (dp_link_bw) { - default: + if (dp_link_bw == 0 || dp_link_count == 0) DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n", dp_link_bw, dp_link_count); - return false; - case DP_LINK_BW_1_62: - *out = 3 * dp_link_count; - break; - case DP_LINK_BW_2_7: - *out = 5 * dp_link_count; - break; - case DP_LINK_BW_5_4: - *out = 10 * dp_link_count; - break; - case DP_LINK_BW_8_1: - *out = 15 * dp_link_count; - break; - } - return true; + return dp_link_bw * dp_link_count / 2; } /** @@ -2710,9 +3404,9 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms goto out_unlock; } - if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1], - mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, - &mgr->pbn_div)) { + mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], + mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK); + if (mgr->pbn_div == 0) { ret = -EINVAL; goto out_unlock; } @@ -2767,6 +3461,23 @@ out_unlock: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); +static void +drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_port *port; + + /* The link address will need to be re-sent on resume */ + mstb->link_address_sent = false; + + list_for_each_entry(port, &mstb->ports, next) { + /* The PBN for each port will also need to be re-probed */ + port->available_pbn = 0; + + if (port->mstb) + drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); + } +} + /** * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager * @mgr: manager to suspend @@ -2780,62 +3491,89 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); + flush_work(&mgr->up_req_work); flush_work(&mgr->work); - flush_work(&mgr->destroy_connector_work); + flush_work(&mgr->delayed_destroy_work); + + mutex_lock(&mgr->lock); + if (mgr->mst_state && mgr->mst_primary) + drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); + mutex_unlock(&mgr->lock); } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); /** * drm_dp_mst_topology_mgr_resume() - resume the MST manager * @mgr: manager to resume + * @sync: whether or not to perform topology reprobing synchronously * * This will fetch DPCD and see if the device is still there, * if it is, it will rewrite the MSTM control bits, and return. * - * if the device fails this returns -1, and the driver should do + * If the device fails this returns -1, and the driver should do * a full MST reprobe, in case we were undocked. + * + * During system resume (where it is assumed that the driver will be calling + * drm_atomic_helper_resume()) this function should be called beforehand with + * @sync set to true. In contexts like runtime resume where the driver is not + * expected to be calling drm_atomic_helper_resume(), this function should be + * called with @sync set to false in order to avoid deadlocking. + * + * Returns: -1 if the MST topology was removed while we were suspended, 0 + * otherwise. */ -int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) +int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, + bool sync) { - int ret = 0; + int ret; + u8 guid[16]; mutex_lock(&mgr->lock); + if (!mgr->mst_primary) + goto out_fail; - if (mgr->mst_primary) { - int sret; - u8 guid[16]; + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, + DP_RECEIVER_CAP_SIZE); + if (ret != DP_RECEIVER_CAP_SIZE) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } - sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); - if (sret != DP_RECEIVER_CAP_SIZE) { - DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); + if (ret < 0) { + DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); + goto out_fail; + } - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); - if (ret < 0) { - DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } + /* Some hubs forget their guids after they resume */ + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); + if (ret != 16) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } + drm_dp_check_mstb_guid(mgr->mst_primary, guid); - /* Some hubs forget their guids after they resume */ - sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); - if (sret != 16) { - DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } - drm_dp_check_mstb_guid(mgr->mst_primary, guid); + /* + * For the final step of resuming the topology, we need to bring the + * state of our in-memory topology back into sync with reality. So, + * restart the probing process as if we're probing a new hub + */ + queue_work(system_long_wq, &mgr->work); + mutex_unlock(&mgr->lock); - ret = 0; - } else - ret = -1; + if (sync) { + DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n"); + flush_work(&mgr->work); + } -out_unlock: + return 0; + +out_fail: mutex_unlock(&mgr->lock); - return ret; + return -1; } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); @@ -2890,136 +3628,198 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) { - int ret = 0; + struct drm_dp_sideband_msg_tx *txmsg; + struct drm_dp_mst_branch *mstb; + struct drm_dp_sideband_msg_hdr *hdr = &mgr->down_rep_recv.initial_hdr; + int slot = -1; + + if (!drm_dp_get_one_sb_msg(mgr, false)) + goto clear_down_rep_recv; - if (!drm_dp_get_one_sb_msg(mgr, false)) { - memset(&mgr->down_rep_recv, 0, - sizeof(struct drm_dp_sideband_msg_rx)); + if (!mgr->down_rep_recv.have_eomt) return 0; + + mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", + hdr->lct); + goto clear_down_rep_recv; + } + + /* find the message */ + slot = hdr->seqno; + mutex_lock(&mgr->qlock); + txmsg = mstb->tx_slots[slot]; + /* remove from slots */ + mutex_unlock(&mgr->qlock); + + if (!txmsg) { + DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", + mstb, hdr->seqno, hdr->lct, hdr->rad[0], + mgr->down_rep_recv.msg[0]); + goto no_msg; } - if (mgr->down_rep_recv.have_eomt) { - struct drm_dp_sideband_msg_tx *txmsg; - struct drm_dp_mst_branch *mstb; - int slot = -1; - mstb = drm_dp_get_mst_branch_device(mgr, - mgr->down_rep_recv.initial_hdr.lct, - mgr->down_rep_recv.initial_hdr.rad); + drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct); - memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); - return 0; - } + if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) + DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n", + txmsg->reply.req_type, + drm_dp_mst_req_type_str(txmsg->reply.req_type), + txmsg->reply.u.nak.reason, + drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason), + txmsg->reply.u.nak.nak_data); - /* find the message */ - slot = mgr->down_rep_recv.initial_hdr.seqno; - mutex_lock(&mgr->qlock); - txmsg = mstb->tx_slots[slot]; - /* remove from slots */ - mutex_unlock(&mgr->qlock); - - if (!txmsg) { - DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", - mstb, - mgr->down_rep_recv.initial_hdr.seqno, - mgr->down_rep_recv.initial_hdr.lct, - mgr->down_rep_recv.initial_hdr.rad[0], - mgr->down_rep_recv.msg[0]); - drm_dp_mst_topology_put_mstb(mstb); - memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); - return 0; - } + memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + drm_dp_mst_topology_put_mstb(mstb); - drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); + mutex_lock(&mgr->qlock); + txmsg->state = DRM_DP_SIDEBAND_TX_RX; + mstb->tx_slots[slot] = NULL; + mutex_unlock(&mgr->qlock); - if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) - DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n", - txmsg->reply.req_type, - drm_dp_mst_req_type_str(txmsg->reply.req_type), - txmsg->reply.u.nak.reason, - drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason), - txmsg->reply.u.nak.nak_data); - - memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); - drm_dp_mst_topology_put_mstb(mstb); + wake_up_all(&mgr->tx_waitq); + + return 0; - mutex_lock(&mgr->qlock); - txmsg->state = DRM_DP_SIDEBAND_TX_RX; - mstb->tx_slots[slot] = NULL; - mutex_unlock(&mgr->qlock); +no_msg: + drm_dp_mst_topology_put_mstb(mstb); +clear_down_rep_recv: + memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); - wake_up_all(&mgr->tx_waitq); - } - return ret; + return 0; } -static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) +static inline bool +drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_pending_up_req *up_req) { - int ret = 0; + struct drm_dp_mst_branch *mstb = NULL; + struct drm_dp_sideband_msg_req_body *msg = &up_req->msg; + struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr; + bool hotplug = false; - if (!drm_dp_get_one_sb_msg(mgr, true)) { - memset(&mgr->up_req_recv, 0, - sizeof(struct drm_dp_sideband_msg_rx)); - return 0; + if (hdr->broadcast) { + const u8 *guid = NULL; + + if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) + guid = msg->u.conn_stat.guid; + else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY) + guid = msg->u.resource_stat.guid; + + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); + } else { + mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); } - if (mgr->up_req_recv.have_eomt) { - struct drm_dp_sideband_msg_req_body msg; - struct drm_dp_mst_branch *mstb = NULL; - bool seqno; + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", + hdr->lct); + return false; + } - if (!mgr->up_req_recv.initial_hdr.broadcast) { - mstb = drm_dp_get_mst_branch_device(mgr, - mgr->up_req_recv.initial_hdr.lct, - mgr->up_req_recv.initial_hdr.rad); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); - memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); - return 0; - } - } + /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */ + if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { + drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); + hotplug = true; + } - seqno = mgr->up_req_recv.initial_hdr.seqno; - drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); + drm_dp_mst_topology_put_mstb(mstb); + return hotplug; +} - if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { - drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); +static void drm_dp_mst_up_req_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, + up_req_work); + struct drm_dp_pending_up_req *up_req; + bool send_hotplug = false; - if (!mstb) - mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid); + mutex_lock(&mgr->probe_lock); + while (true) { + mutex_lock(&mgr->up_req_lock); + up_req = list_first_entry_or_null(&mgr->up_req_list, + struct drm_dp_pending_up_req, + next); + if (up_req) + list_del(&up_req->next); + mutex_unlock(&mgr->up_req_lock); + + if (!up_req) + break; - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); - memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); - return 0; - } + send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req); + kfree(up_req); + } + mutex_unlock(&mgr->probe_lock); - drm_dp_update_port(mstb, &msg.u.conn_stat); + if (send_hotplug) + drm_kms_helper_hotplug_event(mgr->dev); +} - DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type); - drm_kms_helper_hotplug_event(mgr->dev); +static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr; + struct drm_dp_pending_up_req *up_req; + bool seqno; - } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { - drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); - if (!mstb) - mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid); + if (!drm_dp_get_one_sb_msg(mgr, true)) + goto out; - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); - memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); - return 0; - } + if (!mgr->up_req_recv.have_eomt) + return 0; - DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); - } + up_req = kzalloc(sizeof(*up_req), GFP_KERNEL); + if (!up_req) { + DRM_ERROR("Not enough memory to process MST up req\n"); + return -ENOMEM; + } + INIT_LIST_HEAD(&up_req->next); - if (mstb) - drm_dp_mst_topology_put_mstb(mstb); + seqno = hdr->seqno; + drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg); - memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && + up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) { + DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n", + up_req->msg.req_type); + kfree(up_req); + goto out; } - return ret; + + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type, + seqno, false); + + if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { + const struct drm_dp_connection_status_notify *conn_stat = + &up_req->msg.u.conn_stat; + + DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", + conn_stat->port_number, + conn_stat->legacy_device_plug_status, + conn_stat->displayport_device_plug_status, + conn_stat->message_capability_status, + conn_stat->input_port, + conn_stat->peer_device_type); + } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { + const struct drm_dp_resource_status_notify *res_stat = + &up_req->msg.u.resource_stat; + + DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", + res_stat->port_number, + res_stat->available_pbn); + } + + up_req->hdr = *hdr; + mutex_lock(&mgr->up_req_lock); + list_add_tail(&up_req->next, &mgr->up_req_list); + mutex_unlock(&mgr->up_req_lock); + queue_work(system_long_wq, &mgr->up_req_work); + +out: + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); + return 0; } /** @@ -3063,22 +3863,31 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq); /** * drm_dp_mst_detect_port() - get connection status for an MST port * @connector: DRM connector for this port + * @ctx: The acquisition context to use for grabbing locks * @mgr: manager for this port - * @port: unverified pointer to a port + * @port: pointer to a port * - * This returns the current connection state for a port. It validates the - * port pointer still exists so the caller doesn't require a reference + * This returns the current connection state for a port. */ -enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, - struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +int +drm_dp_mst_detect_port(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) { - enum drm_connector_status status = connector_status_disconnected; + int ret; /* we need to search for the port in the mgr in case it's gone */ port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return connector_status_disconnected; + ret = drm_modeset_lock(&mgr->base.lock, ctx); + if (ret) + goto out; + + ret = connector_status_disconnected; + if (!port->ddps) goto out; @@ -3088,7 +3897,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector break; case DP_PEER_DEVICE_SST_SINK: - status = connector_status_connected; + ret = connector_status_connected; /* for logical ports - cache the EDID */ if (port->port_num >= 8 && !port->cached_edid) { port->cached_edid = drm_get_edid(connector, &port->aux.ddc); @@ -3096,12 +3905,12 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector break; case DP_PEER_DEVICE_DP_LEGACY_CONV: if (port->ldps) - status = connector_status_connected; + ret = connector_status_connected; break; } out: drm_dp_mst_topology_put_port(port); - return status; + return ret; } EXPORT_SYMBOL(drm_dp_mst_detect_port); @@ -3237,7 +4046,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, { struct drm_dp_mst_topology_state *topology_state; struct drm_dp_vcpi_allocation *pos, *vcpi = NULL; - int prev_slots, req_slots, ret; + int prev_slots, req_slots; topology_state = drm_atomic_get_mst_topology_state(state, mgr); if (IS_ERR(topology_state)) @@ -3284,8 +4093,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, } vcpi->vcpi = req_slots; - ret = req_slots; - return ret; + return req_slots; } EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); @@ -3539,13 +4347,6 @@ EXPORT_SYMBOL(drm_dp_check_act_status); */ int drm_dp_calc_pbn_mode(int clock, int bpp) { - u64 kbps; - s64 peak_kbps; - u32 numerator; - u32 denominator; - - kbps = clock * bpp; - /* * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 * The unit of 54/64Mbytes/sec is an arbitrary unit chosen based on @@ -3556,41 +4357,11 @@ int drm_dp_calc_pbn_mode(int clock, int bpp) * peak_kbps *= (64/54) * peak_kbps *= 8 convert to bytes */ - - numerator = 64 * 1006; - denominator = 54 * 8 * 1000 * 1000; - - kbps *= numerator; - peak_kbps = drm_fixp_from_fraction(kbps, denominator); - - return drm_fixp2int_ceil(peak_kbps); + return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006), + 8 * 54 * 1000 * 1000); } EXPORT_SYMBOL(drm_dp_calc_pbn_mode); -static int test_calc_pbn_mode(void) -{ - int ret; - ret = drm_dp_calc_pbn_mode(154000, 30); - if (ret != 689) { - DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", - 154000, 30, 689, ret); - return -EINVAL; - } - ret = drm_dp_calc_pbn_mode(234000, 30); - if (ret != 1047) { - DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", - 234000, 30, 1047, ret); - return -EINVAL; - } - ret = drm_dp_calc_pbn_mode(297000, 24); - if (ret != 1063) { - DRM_ERROR("PBN calculation test failed - clock %d, bpp %d, expected PBN %d, actual PBN %d.\n", - 297000, 24, 1063, ret); - return -EINVAL; - } - return 0; -} - /* we want to kick the TX after we've ack the up/down IRQs. */ static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr) { @@ -3729,36 +4500,103 @@ static void drm_dp_tx_work(struct work_struct *work) mutex_unlock(&mgr->qlock); } -static void drm_dp_destroy_connector_work(struct work_struct *work) +static inline void +drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) { - struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); - struct drm_dp_mst_port *port; - bool send_hotplug = false; + if (port->connector) + port->mgr->cbs->destroy_connector(port->mgr, port->connector); + + drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE); + drm_dp_mst_put_port_malloc(port); +} + +static inline void +drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + struct drm_dp_mst_port *port, *tmp; + bool wake_tx = false; + + mutex_lock(&mgr->lock); + list_for_each_entry_safe(port, tmp, &mstb->ports, next) { + list_del(&port->next); + drm_dp_mst_topology_put_port(port); + } + mutex_unlock(&mgr->lock); + + /* drop any tx slots msg */ + mutex_lock(&mstb->mgr->qlock); + if (mstb->tx_slots[0]) { + mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[0] = NULL; + wake_tx = true; + } + if (mstb->tx_slots[1]) { + mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[1] = NULL; + wake_tx = true; + } + mutex_unlock(&mstb->mgr->qlock); + + if (wake_tx) + wake_up_all(&mstb->mgr->tx_waitq); + + drm_dp_mst_put_mstb_malloc(mstb); +} + +static void drm_dp_delayed_destroy_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, + delayed_destroy_work); + bool send_hotplug = false, go_again; + /* * Not a regular list traverse as we have to drop the destroy - * connector lock before destroying the connector, to avoid AB->BA + * connector lock before destroying the mstb/port, to avoid AB->BA * ordering between this lock and the config mutex. */ - for (;;) { - mutex_lock(&mgr->destroy_connector_lock); - port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next); - if (!port) { - mutex_unlock(&mgr->destroy_connector_lock); - break; + do { + go_again = false; + + for (;;) { + struct drm_dp_mst_branch *mstb; + + mutex_lock(&mgr->delayed_destroy_lock); + mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list, + struct drm_dp_mst_branch, + destroy_next); + if (mstb) + list_del(&mstb->destroy_next); + mutex_unlock(&mgr->delayed_destroy_lock); + + if (!mstb) + break; + + drm_dp_delayed_destroy_mstb(mstb); + go_again = true; } - list_del(&port->next); - mutex_unlock(&mgr->destroy_connector_lock); - INIT_LIST_HEAD(&port->next); + for (;;) { + struct drm_dp_mst_port *port; - mgr->cbs->destroy_connector(mgr, port->connector); + mutex_lock(&mgr->delayed_destroy_lock); + port = list_first_entry_or_null(&mgr->destroy_port_list, + struct drm_dp_mst_port, + next); + if (port) + list_del(&port->next); + mutex_unlock(&mgr->delayed_destroy_lock); - drm_dp_port_teardown_pdt(port, port->pdt); - port->pdt = DP_PEER_DEVICE_NONE; + if (!port) + break; + + drm_dp_delayed_destroy_port(port); + send_hotplug = true; + go_again = true; + } + } while (go_again); - drm_dp_mst_put_port_malloc(port); - send_hotplug = true; - } if (send_hotplug) drm_kms_helper_hotplug_event(mgr->dev); } @@ -3920,9 +4758,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs); struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr) { - struct drm_device *dev = mgr->dev; - - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base)); } EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); @@ -3948,12 +4783,20 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mutex_init(&mgr->lock); mutex_init(&mgr->qlock); mutex_init(&mgr->payload_lock); - mutex_init(&mgr->destroy_connector_lock); + mutex_init(&mgr->delayed_destroy_lock); + mutex_init(&mgr->up_req_lock); + mutex_init(&mgr->probe_lock); +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + mutex_init(&mgr->topology_ref_history_lock); +#endif INIT_LIST_HEAD(&mgr->tx_msg_downq); - INIT_LIST_HEAD(&mgr->destroy_connector_list); + INIT_LIST_HEAD(&mgr->destroy_port_list); + INIT_LIST_HEAD(&mgr->destroy_branch_device_list); + INIT_LIST_HEAD(&mgr->up_req_list); INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); INIT_WORK(&mgr->tx_work, drm_dp_tx_work); - INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work); + INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work); + INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work); init_waitqueue_head(&mgr->tx_waitq); mgr->dev = dev; mgr->aux = aux; @@ -3970,8 +4813,6 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, if (!mgr->proposed_vcpis) return -ENOMEM; set_bit(0, &mgr->payload_mask); - if (test_calc_pbn_mode() < 0) - DRM_ERROR("MST PBN self-test failed\n"); mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL); if (mst_state == NULL) @@ -3996,7 +4837,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) { drm_dp_mst_topology_mgr_set_mst(mgr, false); flush_work(&mgr->work); - flush_work(&mgr->destroy_connector_work); + cancel_work_sync(&mgr->delayed_destroy_work); mutex_lock(&mgr->payload_lock); kfree(mgr->payloads); mgr->payloads = NULL; @@ -4007,6 +4848,16 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) mgr->aux = NULL; drm_atomic_private_obj_fini(&mgr->base); mgr->funcs = NULL; + + mutex_destroy(&mgr->delayed_destroy_lock); + mutex_destroy(&mgr->payload_lock); + mutex_destroy(&mgr->qlock); + mutex_destroy(&mgr->lock); + mutex_destroy(&mgr->up_req_lock); + mutex_destroy(&mgr->probe_lock); +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + mutex_destroy(&mgr->topology_ref_history_lock); +#endif } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); diff --git a/drivers/gpu/drm/drm_dp_mst_topology_internal.h b/drivers/gpu/drm/drm_dp_mst_topology_internal.h new file mode 100644 index 000000000000..eeda9a61c657 --- /dev/null +++ b/drivers/gpu/drm/drm_dp_mst_topology_internal.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only + * + * Declarations for DP MST related functions which are only used in selftests + * + * Copyright © 2018 Red Hat + * Authors: + * Lyude Paul <lyude@redhat.com> + */ + +#ifndef _DRM_DP_MST_HELPER_INTERNAL_H_ +#define _DRM_DP_MST_HELPER_INTERNAL_H_ + +#include <drm/drm_dp_mst_helper.h> + +void +drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req, + struct drm_dp_sideband_msg_tx *raw); +int drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw, + struct drm_dp_sideband_msg_req_body *req); +void +drm_dp_dump_sideband_msg_req_body(const struct drm_dp_sideband_msg_req_body *req, + int indent, struct drm_printer *printer); + +#endif /* !_DRM_DP_MST_HELPER_INTERNAL_H_ */ diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 769feefeeeef..1b9b40a1c7c9 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -46,26 +46,9 @@ #include "drm_internal.h" #include "drm_legacy.h" -/* - * drm_debug: Enable debug output. - * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details. - */ -unsigned int drm_debug = 0; -EXPORT_SYMBOL(drm_debug); - MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl"); MODULE_DESCRIPTION("DRM shared core routines"); MODULE_LICENSE("GPL and additional rights"); -MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n" -"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n" -"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n" -"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n" -"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n" -"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n" -"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n" -"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n" -"\t\tBit 8 (0x100) will enable DP messages (displayport code)"); -module_param_named(debug, drm_debug, int, 0600); static DEFINE_SPINLOCK(drm_minor_lock); static struct idr drm_minors_idr; diff --git a/drivers/gpu/drm/drm_dsc.c b/drivers/gpu/drm/drm_dsc.c index 77f4e5ae4197..4a475d9696ff 100644 --- a/drivers/gpu/drm/drm_dsc.c +++ b/drivers/gpu/drm/drm_dsc.c @@ -216,13 +216,11 @@ void drm_dsc_pps_payload_pack(struct drm_dsc_picture_parameter_set *pps_payload, */ for (i = 0; i < DSC_NUM_BUF_RANGES; i++) { pps_payload->rc_range_parameters[i] = - ((dsc_cfg->rc_range_params[i].range_min_qp << - DSC_PPS_RC_RANGE_MINQP_SHIFT) | - (dsc_cfg->rc_range_params[i].range_max_qp << - DSC_PPS_RC_RANGE_MAXQP_SHIFT) | - (dsc_cfg->rc_range_params[i].range_bpg_offset)); - pps_payload->rc_range_parameters[i] = - cpu_to_be16(pps_payload->rc_range_parameters[i]); + cpu_to_be16((dsc_cfg->rc_range_params[i].range_min_qp << + DSC_PPS_RC_RANGE_MINQP_SHIFT) | + (dsc_cfg->rc_range_params[i].range_max_qp << + DSC_PPS_RC_RANGE_MAXQP_SHIFT) | + (dsc_cfg->rc_range_params[i].range_bpg_offset)); } /* PPS 88 */ @@ -336,12 +334,6 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) else vdsc_cfg->nfl_bpg_offset = 0; - /* 2^16 - 1 */ - if (vdsc_cfg->nfl_bpg_offset > 65535) { - DRM_DEBUG_KMS("NflBpgOffset is too large for this slice height\n"); - return -ERANGE; - } - /* Number of groups used to code the entire slice */ groups_total = groups_per_line * vdsc_cfg->slice_height; @@ -371,11 +363,6 @@ int drm_dsc_compute_rc_parameters(struct drm_dsc_config *vdsc_cfg) vdsc_cfg->scale_increment_interval = 0; } - if (vdsc_cfg->scale_increment_interval > 65535) { - DRM_DEBUG_KMS("ScaleIncrementInterval is large for slice height\n"); - return -ERANGE; - } - /* * DSC spec mentions that bits_per_pixel specifies the target * bits/pixel (bpp) rate that is used by the encoder, diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 6b0177112e18..474ac04d5600 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1278,6 +1278,106 @@ static const struct drm_display_mode edid_cea_modes[] = { 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 108 - 1280x720@48Hz 16:9 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240, + 2280, 2500, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 109 - 1280x720@48Hz 64:27 */ + { DRM_MODE("1280x720", DRM_MODE_TYPE_DRIVER, 90000, 1280, 2240, + 2280, 2500, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 110 - 1680x720@48Hz 64:27 */ + { DRM_MODE("1680x720", DRM_MODE_TYPE_DRIVER, 99000, 1680, 2490, + 2530, 2750, 0, 720, 725, 730, 750, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 111 - 1920x1080@48Hz 16:9 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558, + 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 112 - 1920x1080@48Hz 64:27 */ + { DRM_MODE("1920x1080", DRM_MODE_TYPE_DRIVER, 148500, 1920, 2558, + 2602, 2750, 0, 1080, 1084, 1089, 1125, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 113 - 2560x1080@48Hz 64:27 */ + { DRM_MODE("2560x1080", DRM_MODE_TYPE_DRIVER, 198000, 2560, 3558, + 3602, 3750, 0, 1080, 1084, 1089, 1100, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 114 - 3840x2160@48Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116, + 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 115 - 4096x2160@48Hz 256:135 */ + { DRM_MODE("4096x2160", DRM_MODE_TYPE_DRIVER, 594000, 4096, 5116, + 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_256_135, }, + /* 116 - 3840x2160@48Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 594000, 3840, 5116, + 5204, 5500, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 117 - 3840x2160@100Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 118 - 3840x2160@120Hz 16:9 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_16_9, }, + /* 119 - 3840x2160@100Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4896, + 4984, 5280, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 120 - 3840x2160@120Hz 64:27 */ + { DRM_MODE("3840x2160", DRM_MODE_TYPE_DRIVER, 1188000, 3840, 4016, + 4104, 4400, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 120, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 121 - 5120x2160@24Hz 64:27 */ + { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 7116, + 7204, 7500, 0, 2160, 2168, 2178, 2200, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 24, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 122 - 5120x2160@25Hz 64:27 */ + { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 6816, + 6904, 7200, 0, 2160, 2168, 2178, 2200, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 25, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 123 - 5120x2160@30Hz 64:27 */ + { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 396000, 5120, 5784, + 5872, 6000, 0, 2160, 2168, 2178, 2200, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 30, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 124 - 5120x2160@48Hz 64:27 */ + { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5866, + 5954, 6250, 0, 2160, 2168, 2178, 2475, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 48, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 125 - 5120x2160@50Hz 64:27 */ + { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 6216, + 6304, 6600, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 50, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 126 - 5120x2160@60Hz 64:27 */ + { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 742500, 5120, 5284, + 5372, 5500, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 60, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, + /* 127 - 5120x2160@100Hz 64:27 */ + { DRM_MODE("5120x2160", DRM_MODE_TYPE_DRIVER, 1485000, 5120, 6216, + 6304, 6600, 0, 2160, 2168, 2178, 2250, 0, + DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC), + .vrefresh = 100, .picture_aspect_ratio = HDMI_PICTURE_ASPECT_64_27, }, }; /* @@ -1554,7 +1654,7 @@ static void connector_bad_edid(struct drm_connector *connector, { int i; - if (connector->bad_edid_counter++ && !(drm_debug & DRM_UT_KMS)) + if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS)) return; dev_warn(connector->dev->dev, @@ -2092,7 +2192,8 @@ static int standard_timing_level(struct edid *edid) return LEVEL_CVT; if (drm_gtf2_hbreak(edid)) return LEVEL_GTF2; - return LEVEL_GTF; + if (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF) + return LEVEL_GTF; } return LEVEL_DMT; } @@ -3108,18 +3209,10 @@ static bool drm_valid_cea_vic(u8 vic) return vic > 0 && vic < ARRAY_SIZE(edid_cea_modes); } -/** - * drm_get_cea_aspect_ratio - get the picture aspect ratio corresponding to - * the input VIC from the CEA mode list - * @video_code: ID given to each of the CEA modes - * - * Returns picture aspect ratio - */ -enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code) +static enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code) { return edid_cea_modes[video_code].picture_aspect_ratio; } -EXPORT_SYMBOL(drm_get_cea_aspect_ratio); /* * Calculate the alternate clock for HDMI modes (those from the HDMI vendor @@ -3722,7 +3815,7 @@ cea_db_offsets(const u8 *cea, int *start, int *end) if (*end < 4 || *end > 127) return -ERANGE; } else { - return -ENOTSUPP; + return -EOPNOTSUPP; } return 0; @@ -4191,7 +4284,7 @@ int drm_edid_to_sad(struct edid *edid, struct cea_sad **sads) if (cea_revision(cea) < 3) { DRM_DEBUG_KMS("SAD: wrong CEA revision\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } if (cea_db_offsets(cea, &start, &end)) { @@ -4252,7 +4345,7 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb) if (cea_revision(cea) < 3) { DRM_DEBUG_KMS("SAD: wrong CEA revision\n"); - return -ENOTSUPP; + return -EOPNOTSUPP; } if (cea_db_offsets(cea, &start, &end)) { @@ -5071,6 +5164,49 @@ drm_hdmi_infoframe_set_hdr_metadata(struct hdmi_drm_infoframe *frame, } EXPORT_SYMBOL(drm_hdmi_infoframe_set_hdr_metadata); +static u8 drm_mode_hdmi_vic(struct drm_connector *connector, + const struct drm_display_mode *mode) +{ + bool has_hdmi_infoframe = connector ? + connector->display_info.has_hdmi_infoframe : false; + + if (!has_hdmi_infoframe) + return 0; + + /* No HDMI VIC when signalling 3D video format */ + if (mode->flags & DRM_MODE_FLAG_3D_MASK) + return 0; + + return drm_match_hdmi_mode(mode); +} + +static u8 drm_mode_cea_vic(struct drm_connector *connector, + const struct drm_display_mode *mode) +{ + u8 vic; + + /* + * HDMI spec says if a mode is found in HDMI 1.4b 4K modes + * we should send its VIC in vendor infoframes, else send the + * VIC in AVI infoframes. Lets check if this mode is present in + * HDMI 1.4b 4K modes + */ + if (drm_mode_hdmi_vic(connector, mode)) + return 0; + + vic = drm_match_cea_mode(mode); + + /* + * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but + * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we + * have to make sure we dont break HDMI 1.4 sinks. + */ + if (!is_hdmi2_sink(connector) && vic > 64) + return 0; + + return vic; +} + /** * drm_hdmi_avi_infoframe_from_display_mode() - fill an HDMI AVI infoframe with * data from a DRM display mode @@ -5098,29 +5234,7 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame, if (mode->flags & DRM_MODE_FLAG_DBLCLK) frame->pixel_repeat = 1; - frame->video_code = drm_match_cea_mode(mode); - - /* - * HDMI 1.4 VIC range: 1 <= VIC <= 64 (CEA-861-D) but - * HDMI 2.0 VIC range: 1 <= VIC <= 107 (CEA-861-F). So we - * have to make sure we dont break HDMI 1.4 sinks. - */ - if (!is_hdmi2_sink(connector) && frame->video_code > 64) - frame->video_code = 0; - - /* - * HDMI spec says if a mode is found in HDMI 1.4b 4K modes - * we should send its VIC in vendor infoframes, else send the - * VIC in AVI infoframes. Lets check if this mode is present in - * HDMI 1.4b 4K modes - */ - if (frame->video_code) { - u8 vendor_if_vic = drm_match_hdmi_mode(mode); - bool is_s3d = mode->flags & DRM_MODE_FLAG_3D_MASK; - - if (drm_valid_hdmi_vic(vendor_if_vic) && !is_s3d) - frame->video_code = 0; - } + frame->video_code = drm_mode_cea_vic(connector, mode); frame->picture_aspect = HDMI_PICTURE_ASPECT_NONE; @@ -5285,6 +5399,23 @@ drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame, } EXPORT_SYMBOL(drm_hdmi_avi_infoframe_quant_range); +/** + * drm_hdmi_avi_infoframe_bars() - fill the HDMI AVI infoframe + * bar information + * @frame: HDMI AVI infoframe + * @conn_state: connector state + */ +void +drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame, + const struct drm_connector_state *conn_state) +{ + frame->right_bar = conn_state->tv.margins.right; + frame->left_bar = conn_state->tv.margins.left; + frame->top_bar = conn_state->tv.margins.top; + frame->bottom_bar = conn_state->tv.margins.bottom; +} +EXPORT_SYMBOL(drm_hdmi_avi_infoframe_bars); + static enum hdmi_3d_structure s3d_structure_from_display_mode(const struct drm_display_mode *mode) { @@ -5337,8 +5468,6 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, bool has_hdmi_infoframe = connector ? connector->display_info.has_hdmi_infoframe : false; int err; - u32 s3d_flags; - u8 vic; if (!frame || !mode) return -EINVAL; @@ -5346,8 +5475,9 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, if (!has_hdmi_infoframe) return -EINVAL; - vic = drm_match_hdmi_mode(mode); - s3d_flags = mode->flags & DRM_MODE_FLAG_3D_MASK; + err = hdmi_vendor_infoframe_init(frame); + if (err < 0) + return err; /* * Even if it's not absolutely necessary to send the infoframe @@ -5358,15 +5488,7 @@ drm_hdmi_vendor_infoframe_from_display_mode(struct hdmi_vendor_infoframe *frame, * mode if the source simply stops sending the infoframe when * it wants to switch from 3D to 2D. */ - - if (vic && s3d_flags) - return -EINVAL; - - err = hdmi_vendor_infoframe_init(frame); - if (err < 0) - return err; - - frame->vic = vic; + frame->vic = drm_mode_hdmi_vic(connector, mode); frame->s3d_struct = s3d_structure_from_display_mode(mode); return 0; diff --git a/drivers/gpu/drm/drm_edid_load.c b/drivers/gpu/drm/drm_edid_load.c index d38b3b255926..37d8ba3ddb46 100644 --- a/drivers/gpu/drm/drm_edid_load.c +++ b/drivers/gpu/drm/drm_edid_load.c @@ -175,7 +175,7 @@ static void *edid_load(struct drm_connector *connector, const char *name, u8 *edid; int fwsize, builtin; int i, valid_extensions = 0; - bool print_bad_edid = !connector->bad_edid_counter || (drm_debug & DRM_UT_KMS); + bool print_bad_edid = !connector->bad_edid_counter || drm_debug_enabled(DRM_UT_KMS); builtin = match_string(generic_edid_name, GENERIC_EDIDS, name); if (builtin >= 0) { diff --git a/drivers/gpu/drm/drm_encoder.c b/drivers/gpu/drm/drm_encoder.c index 7fb47b7b8b44..80d88a55302e 100644 --- a/drivers/gpu/drm/drm_encoder.c +++ b/drivers/gpu/drm/drm_encoder.c @@ -22,6 +22,7 @@ #include <linux/export.h> +#include <drm/drm_bridge.h> #include <drm/drm_device.h> #include <drm/drm_drv.h> #include <drm/drm_encoder.h> diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index a7ba5b4902d6..8ebeccdeed23 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -46,6 +46,7 @@ #include <drm/drm_print.h> #include <drm/drm_vblank.h> +#include "drm_crtc_helper_internal.h" #include "drm_internal.h" static bool drm_fbdev_emulation = true; @@ -91,9 +92,12 @@ static DEFINE_MUTEX(kernel_fb_helper_lock); * * Drivers that support a dumb buffer with a virtual address and mmap support, * should try out the generic fbdev emulation using drm_fbdev_generic_setup(). + * It will automatically set up deferred I/O if the driver requires a shadow + * buffer. * - * Setup fbdev emulation by calling drm_fb_helper_fbdev_setup() and tear it - * down by calling drm_fb_helper_fbdev_teardown(). + * For other drivers, setup fbdev emulation by calling + * drm_fb_helper_fbdev_setup() and tear it down by calling + * drm_fb_helper_fbdev_teardown(). * * At runtime drivers should restore the fbdev console by using * drm_fb_helper_lastclose() as their &drm_driver.lastclose callback. @@ -126,8 +130,10 @@ static DEFINE_MUTEX(kernel_fb_helper_lock); * always run in process context since the fb_*() function could be running in * atomic context. If drm_fb_helper_deferred_io() is used as the deferred_io * callback it will also schedule dirty_work with the damage collected from the - * mmap page writes. Drivers can use drm_fb_helper_defio_init() to setup - * deferred I/O (coupled with drm_fb_helper_fbdev_teardown()). + * mmap page writes. + * + * Deferred I/O is not compatible with SHMEM. Such drivers should request an + * fbdev shadow buffer and call drm_fbdev_generic_setup() instead. */ static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc) @@ -679,49 +685,6 @@ void drm_fb_helper_deferred_io(struct fb_info *info, EXPORT_SYMBOL(drm_fb_helper_deferred_io); /** - * drm_fb_helper_defio_init - fbdev deferred I/O initialization - * @fb_helper: driver-allocated fbdev helper - * - * This function allocates &fb_deferred_io, sets callback to - * drm_fb_helper_deferred_io(), delay to 50ms and calls fb_deferred_io_init(). - * It should be called from the &drm_fb_helper_funcs->fb_probe callback. - * drm_fb_helper_fbdev_teardown() cleans up deferred I/O. - * - * NOTE: A copy of &fb_ops is made and assigned to &info->fbops. This is done - * because fb_deferred_io_cleanup() clears &fbops->fb_mmap and would thereby - * affect other instances of that &fb_ops. - * - * Returns: - * 0 on success or a negative error code on failure. - */ -int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper) -{ - struct fb_info *info = fb_helper->fbdev; - struct fb_deferred_io *fbdefio; - struct fb_ops *fbops; - - fbdefio = kzalloc(sizeof(*fbdefio), GFP_KERNEL); - fbops = kzalloc(sizeof(*fbops), GFP_KERNEL); - if (!fbdefio || !fbops) { - kfree(fbdefio); - kfree(fbops); - return -ENOMEM; - } - - info->fbdefio = fbdefio; - fbdefio->delay = msecs_to_jiffies(50); - fbdefio->deferred_io = drm_fb_helper_deferred_io; - - *fbops = *info->fbops; - info->fbops = fbops; - - fb_deferred_io_init(info); - - return 0; -} -EXPORT_SYMBOL(drm_fb_helper_defio_init); - -/** * drm_fb_helper_sys_read - wrapper around fb_sys_read * @info: fb_info struct pointer * @buf: userspace buffer to read from framebuffer memory @@ -2355,7 +2318,10 @@ static const struct drm_client_funcs drm_fbdev_client_funcs = { * * Drivers that set the dirty callback on their framebuffer will get a shadow * fbdev buffer that is blitted onto the real buffer. This is done in order to - * make deferred I/O work with all kinds of buffers. + * make deferred I/O work with all kinds of buffers. A shadow buffer can be + * requested explicitly by setting struct drm_mode_config.prefer_shadow or + * struct drm_mode_config.prefer_shadow_fbdev to true beforehand. This is + * required to use generic fbdev emulation with SHMEM helpers. * * This function is safe to call even when there are no connectors present. * Setup will be retried on the next hotplug event. diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 6854f5867d51..000fa4a1899f 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1099,23 +1099,12 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, struct vm_area_struct *vma) { struct drm_device *dev = obj->dev; + int ret; /* Check for valid size. */ if (obj_size < vma->vm_end - vma->vm_start) return -EINVAL; - if (obj->funcs && obj->funcs->vm_ops) - vma->vm_ops = obj->funcs->vm_ops; - else if (dev->driver->gem_vm_ops) - vma->vm_ops = dev->driver->gem_vm_ops; - else - return -EINVAL; - - vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; - vma->vm_private_data = obj; - vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); - /* Take a ref for this mapping of the object, so that the fault * handler can dereference the mmap offset's pointer to the object. * This reference is cleaned up by the corresponding vm_close @@ -1124,6 +1113,33 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, */ drm_gem_object_get(obj); + if (obj->funcs && obj->funcs->mmap) { + /* Remove the fake offset */ + vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); + + ret = obj->funcs->mmap(obj, vma); + if (ret) { + drm_gem_object_put_unlocked(obj); + return ret; + } + WARN_ON(!(vma->vm_flags & VM_DONTEXPAND)); + } else { + if (obj->funcs && obj->funcs->vm_ops) + vma->vm_ops = obj->funcs->vm_ops; + else if (dev->driver->gem_vm_ops) + vma->vm_ops = dev->driver->gem_vm_ops; + else { + drm_gem_object_put_unlocked(obj); + return -EINVAL; + } + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + } + + vma->vm_private_data = obj; + return 0; } EXPORT_SYMBOL(drm_gem_mmap_obj); diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index f5918707672f..0810d3ef6961 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -32,7 +32,7 @@ static const struct drm_gem_object_funcs drm_gem_shmem_funcs = { .get_sg_table = drm_gem_shmem_get_sg_table, .vmap = drm_gem_shmem_vmap, .vunmap = drm_gem_shmem_vunmap, - .vm_ops = &drm_gem_shmem_vm_ops, + .mmap = drm_gem_shmem_mmap, }; /** @@ -505,39 +505,30 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) drm_gem_vm_close(vma); } -const struct vm_operations_struct drm_gem_shmem_vm_ops = { +static const struct vm_operations_struct drm_gem_shmem_vm_ops = { .fault = drm_gem_shmem_fault, .open = drm_gem_shmem_vm_open, .close = drm_gem_shmem_vm_close, }; -EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops); /** * drm_gem_shmem_mmap - Memory-map a shmem GEM object - * @filp: File object + * @obj: gem object * @vma: VMA for the area to be mapped * * This function implements an augmented version of the GEM DRM file mmap * operation for shmem objects. Drivers which employ the shmem helpers should - * use this function as their &file_operations.mmap handler in the DRM device file's - * file_operations structure. - * - * Instead of directly referencing this function, drivers should use the - * DEFINE_DRM_GEM_SHMEM_FOPS() macro. + * use this function as their &drm_gem_object_funcs.mmap handler. * * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma) +int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct drm_gem_shmem_object *shmem; int ret; - ret = drm_gem_mmap(filp, vma); - if (ret) - return ret; - - shmem = to_drm_gem_shmem_obj(vma->vm_private_data); + shmem = to_drm_gem_shmem_obj(obj); ret = drm_gem_shmem_get_pages(shmem); if (ret) { @@ -545,12 +536,10 @@ int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma) return ret; } - /* VM_PFNMAP was set by drm_gem_mmap() */ - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP; - - /* Remove the fake offset */ - vma->vm_pgoff -= drm_vma_node_start(&shmem->base.vma_node); + vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + vma->vm_ops = &drm_gem_shmem_vm_ops; return 0; } diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c new file mode 100644 index 000000000000..605a8a3da7f9 --- /dev/null +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0-or-later + +#include <linux/module.h> + +#include <drm/drm_gem_ttm_helper.h> + +/** + * DOC: overview + * + * This library provides helper functions for gem objects backed by + * ttm. + */ + +/** + * drm_gem_ttm_print_info() - Print &ttm_buffer_object info for debugfs + * @p: DRM printer + * @indent: Tab indentation level + * @gem: GEM object + * + * This function can be used as &drm_gem_object_funcs.print_info + * callback. + */ +void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent, + const struct drm_gem_object *gem) +{ + static const char * const plname[] = { + [ TTM_PL_SYSTEM ] = "system", + [ TTM_PL_TT ] = "tt", + [ TTM_PL_VRAM ] = "vram", + [ TTM_PL_PRIV ] = "priv", + + [ 16 ] = "cached", + [ 17 ] = "uncached", + [ 18 ] = "wc", + [ 19 ] = "contig", + + [ 21 ] = "pinned", /* NO_EVICT */ + [ 22 ] = "topdown", + }; + const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); + + drm_printf_indent(p, indent, "placement="); + drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname)); + drm_printf(p, "\n"); + + if (bo->mem.bus.is_iomem) { + drm_printf_indent(p, indent, "bus.base=%lx\n", + (unsigned long)bo->mem.bus.base); + drm_printf_indent(p, indent, "bus.offset=%lx\n", + (unsigned long)bo->mem.bus.offset); + } +} +EXPORT_SYMBOL(drm_gem_ttm_print_info); + +/** + * drm_gem_ttm_mmap() - mmap &ttm_buffer_object + * @gem: GEM object. + * @vma: vm area. + * + * This function can be used as &drm_gem_object_funcs.mmap + * callback. + */ +int drm_gem_ttm_mmap(struct drm_gem_object *gem, + struct vm_area_struct *vma) +{ + struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); + int ret; + + ret = ttm_bo_mmap_obj(vma, bo); + if (ret < 0) + return ret; + + /* + * ttm has its own object refcounting, so drop gem reference + * to avoid double accounting counting. + */ + drm_gem_object_put_unlocked(gem); + + return 0; +} +EXPORT_SYMBOL(drm_gem_ttm_mmap); + +MODULE_DESCRIPTION("DRM gem ttm helpers"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index fd751078bae1..666cb4c22bb9 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -1,10 +1,15 @@ // SPDX-License-Identifier: GPL-2.0-or-later -#include <drm/drm_gem_vram_helper.h> +#include <drm/drm_debugfs.h> #include <drm/drm_device.h> +#include <drm/drm_file.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_ttm_helper.h> +#include <drm/drm_gem_vram_helper.h> #include <drm/drm_mode.h> +#include <drm/drm_plane.h> #include <drm/drm_prime.h> -#include <drm/drm_vram_mm_helper.h> +#include <drm/drm_simple_kms_helper.h> #include <drm/ttm/ttm_page_alloc.h> static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; @@ -14,6 +19,11 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs; * * This library provides a GEM buffer object that is backed by video RAM * (VRAM). It can be used for framebuffer devices with dedicated memory. + * + * The data structure &struct drm_vram_mm and its helpers implement a memory + * manager for simple framebuffer devices with dedicated video memory. Buffer + * objects are either placed in video RAM or evicted to system memory. The rsp. + * buffer object is provided by &struct drm_gem_vram_object. */ /* @@ -26,6 +36,10 @@ static void drm_gem_vram_cleanup(struct drm_gem_vram_object *gbo) * TTM buffer object in 'bo' has already been cleaned * up; only release the GEM object. */ + + WARN_ON(gbo->kmap_use_count); + WARN_ON(gbo->kmap.virtual); + drm_gem_object_release(&gbo->bo.base); } @@ -47,6 +61,7 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, { unsigned int i; unsigned int c = 0; + u32 invariant_flags = pl_flag & TTM_PL_FLAG_TOPDOWN; gbo->placement.placement = gbo->placements; gbo->placement.busy_placement = gbo->placements; @@ -54,15 +69,18 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, if (pl_flag & TTM_PL_FLAG_VRAM) gbo->placements[c++].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_VRAM; + TTM_PL_FLAG_VRAM | + invariant_flags; if (pl_flag & TTM_PL_FLAG_SYSTEM) gbo->placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM; + TTM_PL_FLAG_SYSTEM | + invariant_flags; if (!c) gbo->placements[c++].flags = TTM_PL_MASK_CACHING | - TTM_PL_FLAG_SYSTEM; + TTM_PL_FLAG_SYSTEM | + invariant_flags; gbo->placement.num_placement = c; gbo->placement.num_busy_placement = c; @@ -82,8 +100,7 @@ static int drm_gem_vram_init(struct drm_device *dev, int ret; size_t acc_size; - if (!gbo->bo.base.funcs) - gbo->bo.base.funcs = &drm_gem_vram_object_funcs; + gbo->bo.base.funcs = &drm_gem_vram_object_funcs; ret = drm_gem_object_init(dev, &gbo->bo.base, size); if (ret) @@ -192,30 +209,12 @@ s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo) } EXPORT_SYMBOL(drm_gem_vram_offset); -/** - * drm_gem_vram_pin() - Pins a GEM VRAM object in a region. - * @gbo: the GEM VRAM object - * @pl_flag: a bitmask of possible memory regions - * - * Pinning a buffer object ensures that it is not evicted from - * a memory region. A pinned buffer object has to be unpinned before - * it can be pinned to another region. If the pl_flag argument is 0, - * the buffer is pinned at its current location (video RAM or system - * memory). - * - * Returns: - * 0 on success, or - * a negative error code otherwise. - */ -int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag) +static int drm_gem_vram_pin_locked(struct drm_gem_vram_object *gbo, + unsigned long pl_flag) { int i, ret; struct ttm_operation_ctx ctx = { false, false }; - ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); - if (ret < 0) - return ret; - if (gbo->pin_count) goto out; @@ -227,62 +226,123 @@ int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag) ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); if (ret < 0) - goto err_ttm_bo_unreserve; + return ret; out: ++gbo->pin_count; - ttm_bo_unreserve(&gbo->bo); return 0; - -err_ttm_bo_unreserve: - ttm_bo_unreserve(&gbo->bo); - return ret; } -EXPORT_SYMBOL(drm_gem_vram_pin); /** - * drm_gem_vram_unpin() - Unpins a GEM VRAM object + * drm_gem_vram_pin() - Pins a GEM VRAM object in a region. * @gbo: the GEM VRAM object + * @pl_flag: a bitmask of possible memory regions + * + * Pinning a buffer object ensures that it is not evicted from + * a memory region. A pinned buffer object has to be unpinned before + * it can be pinned to another region. If the pl_flag argument is 0, + * the buffer is pinned at its current location (video RAM or system + * memory). + * + * Small buffer objects, such as cursor images, can lead to memory + * fragmentation if they are pinned in the middle of video RAM. This + * is especially a problem on devices with only a small amount of + * video RAM. Fragmentation can prevent the primary framebuffer from + * fitting in, even though there's enough memory overall. The modifier + * DRM_GEM_VRAM_PL_FLAG_TOPDOWN marks the buffer object to be pinned + * at the high end of the memory region to avoid fragmentation. * * Returns: * 0 on success, or * a negative error code otherwise. */ -int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) +int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag) { - int i, ret; - struct ttm_operation_ctx ctx = { false, false }; + int ret; ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); - if (ret < 0) + if (ret) return ret; + ret = drm_gem_vram_pin_locked(gbo, pl_flag); + ttm_bo_unreserve(&gbo->bo); + + return ret; +} +EXPORT_SYMBOL(drm_gem_vram_pin); + +static int drm_gem_vram_unpin_locked(struct drm_gem_vram_object *gbo) +{ + int i, ret; + struct ttm_operation_ctx ctx = { false, false }; if (WARN_ON_ONCE(!gbo->pin_count)) - goto out; + return 0; --gbo->pin_count; if (gbo->pin_count) - goto out; + return 0; for (i = 0; i < gbo->placement.num_placement ; ++i) gbo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; ret = ttm_bo_validate(&gbo->bo, &gbo->placement, &ctx); if (ret < 0) - goto err_ttm_bo_unreserve; - -out: - ttm_bo_unreserve(&gbo->bo); + return ret; return 0; +} -err_ttm_bo_unreserve: +/** + * drm_gem_vram_unpin() - Unpins a GEM VRAM object + * @gbo: the GEM VRAM object + * + * Returns: + * 0 on success, or + * a negative error code otherwise. + */ +int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo) +{ + int ret; + + ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); + if (ret) + return ret; + ret = drm_gem_vram_unpin_locked(gbo); ttm_bo_unreserve(&gbo->bo); + return ret; } EXPORT_SYMBOL(drm_gem_vram_unpin); +static void *drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, + bool map, bool *is_iomem) +{ + int ret; + struct ttm_bo_kmap_obj *kmap = &gbo->kmap; + + if (gbo->kmap_use_count > 0) + goto out; + + if (kmap->virtual || !map) + goto out; + + ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap); + if (ret) + return ERR_PTR(ret); + +out: + if (!kmap->virtual) { + if (is_iomem) + *is_iomem = false; + return NULL; /* not mapped; don't increment ref */ + } + ++gbo->kmap_use_count; + if (is_iomem) + return ttm_kmap_obj_virtual(kmap, is_iomem); + return kmap->virtual; +} + /** * drm_gem_vram_kmap() - Maps a GEM VRAM object into kernel address space * @gbo: the GEM VRAM object @@ -304,43 +364,121 @@ void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map, bool *is_iomem) { int ret; - struct ttm_bo_kmap_obj *kmap = &gbo->kmap; - - if (kmap->virtual || !map) - goto out; + void *virtual; - ret = ttm_bo_kmap(&gbo->bo, 0, gbo->bo.num_pages, kmap); + ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); if (ret) return ERR_PTR(ret); + virtual = drm_gem_vram_kmap_locked(gbo, map, is_iomem); + ttm_bo_unreserve(&gbo->bo); -out: - if (!is_iomem) - return kmap->virtual; - if (!kmap->virtual) { - *is_iomem = false; - return NULL; - } - return ttm_kmap_obj_virtual(kmap, is_iomem); + return virtual; } EXPORT_SYMBOL(drm_gem_vram_kmap); +static void drm_gem_vram_kunmap_locked(struct drm_gem_vram_object *gbo) +{ + if (WARN_ON_ONCE(!gbo->kmap_use_count)) + return; + if (--gbo->kmap_use_count > 0) + return; + + /* + * Permanently mapping and unmapping buffers adds overhead from + * updating the page tables and creates debugging output. Therefore, + * we delay the actual unmap operation until the BO gets evicted + * from memory. See drm_gem_vram_bo_driver_move_notify(). + */ +} + /** * drm_gem_vram_kunmap() - Unmaps a GEM VRAM object * @gbo: the GEM VRAM object */ void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo) { - struct ttm_bo_kmap_obj *kmap = &gbo->kmap; + int ret; - if (!kmap->virtual) + ret = ttm_bo_reserve(&gbo->bo, false, false, NULL); + if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) return; - - ttm_bo_kunmap(kmap); - kmap->virtual = NULL; + drm_gem_vram_kunmap_locked(gbo); + ttm_bo_unreserve(&gbo->bo); } EXPORT_SYMBOL(drm_gem_vram_kunmap); /** + * drm_gem_vram_vmap() - Pins and maps a GEM VRAM object into kernel address + * space + * @gbo: The GEM VRAM object to map + * + * The vmap function pins a GEM VRAM object to its current location, either + * system or video memory, and maps its buffer into kernel address space. + * As pinned object cannot be relocated, you should avoid pinning objects + * permanently. Call drm_gem_vram_vunmap() with the returned address to + * unmap and unpin the GEM VRAM object. + * + * If you have special requirements for the pinning or mapping operations, + * call drm_gem_vram_pin() and drm_gem_vram_kmap() directly. + * + * Returns: + * The buffer's virtual address on success, or + * an ERR_PTR()-encoded error code otherwise. + */ +void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo) +{ + int ret; + void *base; + + ret = ttm_bo_reserve(&gbo->bo, true, false, NULL); + if (ret) + return ERR_PTR(ret); + + ret = drm_gem_vram_pin_locked(gbo, 0); + if (ret) + goto err_ttm_bo_unreserve; + base = drm_gem_vram_kmap_locked(gbo, true, NULL); + if (IS_ERR(base)) { + ret = PTR_ERR(base); + goto err_drm_gem_vram_unpin_locked; + } + + ttm_bo_unreserve(&gbo->bo); + + return base; + +err_drm_gem_vram_unpin_locked: + drm_gem_vram_unpin_locked(gbo); +err_ttm_bo_unreserve: + ttm_bo_unreserve(&gbo->bo); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(drm_gem_vram_vmap); + +/** + * drm_gem_vram_vunmap() - Unmaps and unpins a GEM VRAM object + * @gbo: The GEM VRAM object to unmap + * @vaddr: The mapping's base address as returned by drm_gem_vram_vmap() + * + * A call to drm_gem_vram_vunmap() unmaps and unpins a GEM VRAM buffer. See + * the documentation for drm_gem_vram_vmap() for more information. + */ +void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr) +{ + int ret; + + ret = ttm_bo_reserve(&gbo->bo, false, false, NULL); + if (WARN_ONCE(ret, "ttm_bo_reserve_failed(): ret=%d\n", ret)) + return; + + drm_gem_vram_kunmap_locked(gbo); + drm_gem_vram_unpin_locked(gbo); + + ttm_bo_unreserve(&gbo->bo); +} +EXPORT_SYMBOL(drm_gem_vram_vunmap); + +/** * drm_gem_vram_fill_create_dumb() - \ Helper for implementing &struct drm_driver.dumb_create * @file: the DRM file @@ -410,59 +548,27 @@ static bool drm_is_gem_vram(struct ttm_buffer_object *bo) return (bo->destroy == ttm_buffer_object_destroy); } -/** - * drm_gem_vram_bo_driver_evict_flags() - \ - Implements &struct ttm_bo_driver.evict_flags - * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo - * @pl: TTM placement information. - */ -void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo, - struct ttm_placement *pl) +static void drm_gem_vram_bo_driver_evict_flags(struct drm_gem_vram_object *gbo, + struct ttm_placement *pl) { - struct drm_gem_vram_object *gbo; - - /* TTM may pass BOs that are not GEM VRAM BOs. */ - if (!drm_is_gem_vram(bo)) - return; - - gbo = drm_gem_vram_of_bo(bo); drm_gem_vram_placement(gbo, TTM_PL_FLAG_SYSTEM); *pl = gbo->placement; } -EXPORT_SYMBOL(drm_gem_vram_bo_driver_evict_flags); -/** - * drm_gem_vram_bo_driver_verify_access() - \ - Implements &struct ttm_bo_driver.verify_access - * @bo: TTM buffer object. Refers to &struct drm_gem_vram_object.bo - * @filp: File pointer. - * - * Returns: - * 0 on success, or - * a negative errno code otherwise. - */ -int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo, - struct file *filp) +static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo, + bool evict, + struct ttm_mem_reg *new_mem) { - struct drm_gem_vram_object *gbo = drm_gem_vram_of_bo(bo); + struct ttm_bo_kmap_obj *kmap = &gbo->kmap; - return drm_vma_node_verify_access(&gbo->bo.base.vma_node, - filp->private_data); -} -EXPORT_SYMBOL(drm_gem_vram_bo_driver_verify_access); + if (WARN_ON_ONCE(gbo->kmap_use_count)) + return; -/* - * drm_gem_vram_mm_funcs - Functions for &struct drm_vram_mm - * - * Most users of @struct drm_gem_vram_object will also use - * @struct drm_vram_mm. This instance of &struct drm_vram_mm_funcs - * can be used to connect both. - */ -const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs = { - .evict_flags = drm_gem_vram_bo_driver_evict_flags, - .verify_access = drm_gem_vram_bo_driver_verify_access -}; -EXPORT_SYMBOL(drm_gem_vram_mm_funcs); + if (!kmap->virtual) + return; + ttm_bo_kunmap(kmap); + kmap->virtual = NULL; +} /* * Helpers for struct drm_gem_object_funcs @@ -544,6 +650,129 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file, EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset); /* + * Helpers for struct drm_plane_helper_funcs + */ + +/** + * drm_gem_vram_plane_helper_prepare_fb() - \ + * Implements &struct drm_plane_helper_funcs.prepare_fb + * @plane: a DRM plane + * @new_state: the plane's new state + * + * During plane updates, this function pins the GEM VRAM + * objects of the plane's new framebuffer to VRAM. Call + * drm_gem_vram_plane_helper_cleanup_fb() to unpin them. + * + * Returns: + * 0 on success, or + * a negative errno code otherwise. + */ +int +drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + size_t i; + struct drm_gem_vram_object *gbo; + int ret; + + if (!new_state->fb) + return 0; + + for (i = 0; i < ARRAY_SIZE(new_state->fb->obj); ++i) { + if (!new_state->fb->obj[i]) + continue; + gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]); + ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); + if (ret) + goto err_drm_gem_vram_unpin; + } + + return 0; + +err_drm_gem_vram_unpin: + while (i) { + --i; + gbo = drm_gem_vram_of_gem(new_state->fb->obj[i]); + drm_gem_vram_unpin(gbo); + } + return ret; +} +EXPORT_SYMBOL(drm_gem_vram_plane_helper_prepare_fb); + +/** + * drm_gem_vram_plane_helper_cleanup_fb() - \ + * Implements &struct drm_plane_helper_funcs.cleanup_fb + * @plane: a DRM plane + * @old_state: the plane's old state + * + * During plane updates, this function unpins the GEM VRAM + * objects of the plane's old framebuffer from VRAM. Complements + * drm_gem_vram_plane_helper_prepare_fb(). + */ +void +drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + size_t i; + struct drm_gem_vram_object *gbo; + + if (!old_state->fb) + return; + + for (i = 0; i < ARRAY_SIZE(old_state->fb->obj); ++i) { + if (!old_state->fb->obj[i]) + continue; + gbo = drm_gem_vram_of_gem(old_state->fb->obj[i]); + drm_gem_vram_unpin(gbo); + } +} +EXPORT_SYMBOL(drm_gem_vram_plane_helper_cleanup_fb); + +/* + * Helpers for struct drm_simple_display_pipe_funcs + */ + +/** + * drm_gem_vram_simple_display_pipe_prepare_fb() - \ + * Implements &struct drm_simple_display_pipe_funcs.prepare_fb + * @pipe: a simple display pipe + * @new_state: the plane's new state + * + * During plane updates, this function pins the GEM VRAM + * objects of the plane's new framebuffer to VRAM. Call + * drm_gem_vram_simple_display_pipe_cleanup_fb() to unpin them. + * + * Returns: + * 0 on success, or + * a negative errno code otherwise. + */ +int drm_gem_vram_simple_display_pipe_prepare_fb( + struct drm_simple_display_pipe *pipe, + struct drm_plane_state *new_state) +{ + return drm_gem_vram_plane_helper_prepare_fb(&pipe->plane, new_state); +} +EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_prepare_fb); + +/** + * drm_gem_vram_simple_display_pipe_cleanup_fb() - \ + * Implements &struct drm_simple_display_pipe_funcs.cleanup_fb + * @pipe: a simple display pipe + * @old_state: the plane's old state + * + * During plane updates, this function unpins the GEM VRAM + * objects of the plane's old framebuffer from VRAM. Complements + * drm_gem_vram_simple_display_pipe_prepare_fb(). + */ +void drm_gem_vram_simple_display_pipe_cleanup_fb( + struct drm_simple_display_pipe *pipe, + struct drm_plane_state *old_state) +{ + drm_gem_vram_plane_helper_cleanup_fb(&pipe->plane, old_state); +} +EXPORT_SYMBOL(drm_gem_vram_simple_display_pipe_cleanup_fb); + +/* * PRIME helpers */ @@ -595,17 +824,11 @@ static void drm_gem_vram_object_unpin(struct drm_gem_object *gem) static void *drm_gem_vram_object_vmap(struct drm_gem_object *gem) { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); - int ret; void *base; - ret = drm_gem_vram_pin(gbo, 0); - if (ret) - return NULL; - base = drm_gem_vram_kmap(gbo, true, NULL); - if (IS_ERR(base)) { - drm_gem_vram_unpin(gbo); + base = drm_gem_vram_vmap(gbo); + if (IS_ERR(base)) return NULL; - } return base; } @@ -620,8 +843,7 @@ static void drm_gem_vram_object_vunmap(struct drm_gem_object *gem, { struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(gem); - drm_gem_vram_kunmap(gbo); - drm_gem_vram_unpin(gbo); + drm_gem_vram_vunmap(gbo, vaddr); } /* @@ -633,5 +855,278 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = { .pin = drm_gem_vram_object_pin, .unpin = drm_gem_vram_object_unpin, .vmap = drm_gem_vram_object_vmap, - .vunmap = drm_gem_vram_object_vunmap + .vunmap = drm_gem_vram_object_vunmap, + .mmap = drm_gem_ttm_mmap, + .print_info = drm_gem_ttm_print_info, +}; + +/* + * VRAM memory manager + */ + +/* + * TTM TT + */ + +static void backend_func_destroy(struct ttm_tt *tt) +{ + ttm_tt_fini(tt); + kfree(tt); +} + +static struct ttm_backend_func backend_func = { + .destroy = backend_func_destroy +}; + +/* + * TTM BO device + */ + +static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo, + uint32_t page_flags) +{ + struct ttm_tt *tt; + int ret; + + tt = kzalloc(sizeof(*tt), GFP_KERNEL); + if (!tt) + return NULL; + + tt->func = &backend_func; + + ret = ttm_tt_init(tt, bo, page_flags); + if (ret < 0) + goto err_ttm_tt_init; + + return tt; + +err_ttm_tt_init: + kfree(tt); + return NULL; +} + +static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, + struct ttm_mem_type_manager *man) +{ + switch (type) { + case TTM_PL_SYSTEM: + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case TTM_PL_VRAM: + man->func = &ttm_bo_manager_func; + man->flags = TTM_MEMTYPE_FLAG_FIXED | + TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + default: + return -EINVAL; + } + return 0; +} + +static void bo_driver_evict_flags(struct ttm_buffer_object *bo, + struct ttm_placement *placement) +{ + struct drm_gem_vram_object *gbo; + + /* TTM may pass BOs that are not GEM VRAM BOs. */ + if (!drm_is_gem_vram(bo)) + return; + + gbo = drm_gem_vram_of_bo(bo); + + drm_gem_vram_bo_driver_evict_flags(gbo, placement); +} + +static void bo_driver_move_notify(struct ttm_buffer_object *bo, + bool evict, + struct ttm_mem_reg *new_mem) +{ + struct drm_gem_vram_object *gbo; + + /* TTM may pass BOs that are not GEM VRAM BOs. */ + if (!drm_is_gem_vram(bo)) + return; + + gbo = drm_gem_vram_of_bo(bo); + + drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem); +} + +static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = bdev->man + mem->mem_type; + struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev); + + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + + mem->bus.addr = NULL; + mem->bus.size = mem->num_pages << PAGE_SHIFT; + + switch (mem->mem_type) { + case TTM_PL_SYSTEM: /* nothing to do */ + mem->bus.offset = 0; + mem->bus.base = 0; + mem->bus.is_iomem = false; + break; + case TTM_PL_VRAM: + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.base = vmm->vram_base; + mem->bus.is_iomem = true; + break; + default: + return -EINVAL; + } + + return 0; +} + +static void bo_driver_io_mem_free(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ } + +static struct ttm_bo_driver bo_driver = { + .ttm_tt_create = bo_driver_ttm_tt_create, + .ttm_tt_populate = ttm_pool_populate, + .ttm_tt_unpopulate = ttm_pool_unpopulate, + .init_mem_type = bo_driver_init_mem_type, + .eviction_valuable = ttm_bo_eviction_valuable, + .evict_flags = bo_driver_evict_flags, + .move_notify = bo_driver_move_notify, + .io_mem_reserve = bo_driver_io_mem_reserve, + .io_mem_free = bo_driver_io_mem_free, +}; + +/* + * struct drm_vram_mm + */ + +#if defined(CONFIG_DEBUG_FS) +static int drm_vram_mm_debugfs(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_vram_mm *vmm = node->minor->dev->vram_mm; + struct drm_mm *mm = vmm->bdev.man[TTM_PL_VRAM].priv; + struct drm_printer p = drm_seq_file_printer(m); + + spin_lock(&ttm_bo_glob.lru_lock); + drm_mm_print(mm, &p); + spin_unlock(&ttm_bo_glob.lru_lock); + return 0; +} + +static const struct drm_info_list drm_vram_mm_debugfs_list[] = { + { "vram-mm", drm_vram_mm_debugfs, 0, NULL }, }; +#endif + +/** + * drm_vram_mm_debugfs_init() - Register VRAM MM debugfs file. + * + * @minor: drm minor device. + * + * Returns: + * 0 on success, or + * a negative error code otherwise. + */ +int drm_vram_mm_debugfs_init(struct drm_minor *minor) +{ + int ret = 0; + +#if defined(CONFIG_DEBUG_FS) + ret = drm_debugfs_create_files(drm_vram_mm_debugfs_list, + ARRAY_SIZE(drm_vram_mm_debugfs_list), + minor->debugfs_root, minor); +#endif + return ret; +} +EXPORT_SYMBOL(drm_vram_mm_debugfs_init); + +static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, + uint64_t vram_base, size_t vram_size) +{ + int ret; + + vmm->vram_base = vram_base; + vmm->vram_size = vram_size; + + ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, + dev->anon_inode->i_mapping, + dev->vma_offset_manager, + true); + if (ret) + return ret; + + ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT); + if (ret) + return ret; + + return 0; +} + +static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm) +{ + ttm_bo_device_release(&vmm->bdev); +} + +/* + * Helpers for integration with struct drm_device + */ + +/** + * drm_vram_helper_alloc_mm - Allocates a device's instance of \ + &struct drm_vram_mm + * @dev: the DRM device + * @vram_base: the base address of the video memory + * @vram_size: the size of the video memory in bytes + * + * Returns: + * The new instance of &struct drm_vram_mm on success, or + * an ERR_PTR()-encoded errno code otherwise. + */ +struct drm_vram_mm *drm_vram_helper_alloc_mm( + struct drm_device *dev, uint64_t vram_base, size_t vram_size) +{ + int ret; + + if (WARN_ON(dev->vram_mm)) + return dev->vram_mm; + + dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL); + if (!dev->vram_mm) + return ERR_PTR(-ENOMEM); + + ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size); + if (ret) + goto err_kfree; + + return dev->vram_mm; + +err_kfree: + kfree(dev->vram_mm); + dev->vram_mm = NULL; + return ERR_PTR(ret); +} +EXPORT_SYMBOL(drm_vram_helper_alloc_mm); + +/** + * drm_vram_helper_release_mm - Releases a device's instance of \ + &struct drm_vram_mm + * @dev: the DRM device + */ +void drm_vram_helper_release_mm(struct drm_device *dev) +{ + if (!dev->vram_mm) + return; + + drm_vram_mm_cleanup(dev->vram_mm); + kfree(dev->vram_mm); + dev->vram_mm = NULL; +} +EXPORT_SYMBOL(drm_vram_helper_release_mm); diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 0bec6dbb0142..fbea69d6f909 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -40,6 +40,7 @@ #include <xen/xen.h> #include <drm/drm_agpsupport.h> +#include <drm/drm_cache.h> #include <drm/drm_device.h> #include "drm_legacy.h" diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c index 1961f713aaab..e34058c721be 100644 --- a/drivers/gpu/drm/drm_mipi_dbi.c +++ b/drivers/gpu/drm/drm_mipi_dbi.c @@ -783,7 +783,7 @@ static int mipi_dbi_spi1e_transfer(struct mipi_dbi *dbi, int dc, int i, ret; u8 *dst; - if (drm_debug & DRM_UT_DRIVER) + if (drm_debug_enabled(DRM_UT_DRIVER)) pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n", __func__, dc, max_chunk); @@ -907,7 +907,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc, max_chunk = dbi->tx_buf9_len; dst16 = dbi->tx_buf9; - if (drm_debug & DRM_UT_DRIVER) + if (drm_debug_enabled(DRM_UT_DRIVER)) pr_debug("[drm:%s] dc=%d, max_chunk=%zu, transfers:\n", __func__, dc, max_chunk); @@ -955,7 +955,7 @@ static int mipi_dbi_typec1_command(struct mipi_dbi *dbi, u8 *cmd, int ret; if (mipi_dbi_command_is_read(dbi, *cmd)) - return -ENOTSUPP; + return -EOPNOTSUPP; MIPI_DBI_DEBUG_COMMAND(*cmd, parameters, num); @@ -1021,7 +1021,7 @@ static int mipi_dbi_typec3_command_read(struct mipi_dbi *dbi, u8 *cmd, unsigned int i; for (i = 0; i < len; i++) - data[i] = (buf[i] << 1) | !!(buf[i + 1] & BIT(7)); + data[i] = (buf[i] << 1) | (buf[i + 1] >> 7); } MIPI_DBI_DEBUG_COMMAND(*cmd, data, len); @@ -1187,8 +1187,7 @@ static ssize_t mipi_dbi_debugfs_command_write(struct file *file, struct mipi_dbi_dev *dbidev = m->private; u8 val, cmd = 0, parameters[64]; char *buf, *pos, *token; - unsigned int i; - int ret, idx; + int i, ret, idx; if (!drm_dev_enter(&dbidev->drm, &idx)) return -ENODEV; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 4581c5387372..2a6e34663146 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -174,7 +174,7 @@ static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, node->__subtree_last = LAST(node); - if (hole_node->allocated) { + if (drm_mm_node_allocated(hole_node)) { rb = &hole_node->rb; while (rb) { parent = rb_entry(rb, struct drm_mm_node, rb); @@ -424,9 +424,9 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) node->mm = mm; + __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); list_add(&node->node_list, &hole->node_list); drm_mm_interval_tree_add_node(hole, node); - node->allocated = true; node->hole_size = 0; rm_hole(hole); @@ -543,9 +543,9 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm, node->color = color; node->hole_size = 0; + __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); list_add(&node->node_list, &hole->node_list); drm_mm_interval_tree_add_node(hole, node); - node->allocated = true; rm_hole(hole); if (adj_start > hole_start) @@ -561,6 +561,11 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm, } EXPORT_SYMBOL(drm_mm_insert_node_in_range); +static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node) +{ + return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); +} + /** * drm_mm_remove_node - Remove a memory node from the allocator. * @node: drm_mm_node to remove @@ -574,8 +579,8 @@ void drm_mm_remove_node(struct drm_mm_node *node) struct drm_mm *mm = node->mm; struct drm_mm_node *prev_node; - DRM_MM_BUG_ON(!node->allocated); - DRM_MM_BUG_ON(node->scanned_block); + DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); + DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); prev_node = list_prev_entry(node, node_list); @@ -584,11 +589,12 @@ void drm_mm_remove_node(struct drm_mm_node *node) drm_mm_interval_tree_remove(node, &mm->interval_tree); list_del(&node->node_list); - node->allocated = false; if (drm_mm_hole_follows(prev_node)) rm_hole(prev_node); add_hole(prev_node); + + clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); } EXPORT_SYMBOL(drm_mm_remove_node); @@ -605,10 +611,11 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) { struct drm_mm *mm = old->mm; - DRM_MM_BUG_ON(!old->allocated); + DRM_MM_BUG_ON(!drm_mm_node_allocated(old)); *new = *old; + __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags); list_replace(&old->node_list, &new->node_list); rb_replace_node_cached(&old->rb, &new->rb, &mm->interval_tree); @@ -622,8 +629,7 @@ void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) &mm->holes_addr); } - old->allocated = false; - new->allocated = true; + clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, &old->flags); } EXPORT_SYMBOL(drm_mm_replace_node); @@ -731,9 +737,9 @@ bool drm_mm_scan_add_block(struct drm_mm_scan *scan, u64 adj_start, adj_end; DRM_MM_BUG_ON(node->mm != mm); - DRM_MM_BUG_ON(!node->allocated); - DRM_MM_BUG_ON(node->scanned_block); - node->scanned_block = true; + DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); + DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); + __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); mm->scan_active++; /* Remove this block from the node_list so that we enlarge the hole @@ -818,8 +824,8 @@ bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, struct drm_mm_node *prev_node; DRM_MM_BUG_ON(node->mm != scan->mm); - DRM_MM_BUG_ON(!node->scanned_block); - node->scanned_block = false; + DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node)); + __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); DRM_MM_BUG_ON(!node->mm->scan_active); node->mm->scan_active--; @@ -917,7 +923,7 @@ void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) /* Clever trick to avoid a special case in the free hole tracking. */ INIT_LIST_HEAD(&mm->head_node.node_list); - mm->head_node.allocated = false; + mm->head_node.flags = 0; mm->head_node.mm = mm; mm->head_node.start = start + size; mm->head_node.size = -size; diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 7bc03c3c154f..3b570a404933 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -428,8 +428,6 @@ EXPORT_SYMBOL(drm_mode_config_init); * Note that since this /should/ happen single-threaded at driver/device * teardown time, no locking is required. It's the driver's job to ensure that * this guarantee actually holds true. - * - * FIXME: cleanup any dangling user buffer objects too */ void drm_mode_config_cleanup(struct drm_device *dev) { diff --git a/drivers/gpu/drm/drm_of.c b/drivers/gpu/drm/drm_of.c index 43d89dd59c6b..0ca58803ba46 100644 --- a/drivers/gpu/drm/drm_of.c +++ b/drivers/gpu/drm/drm_of.c @@ -250,11 +250,6 @@ int drm_of_find_panel_or_bridge(const struct device_node *np, if (!remote) return -ENODEV; - if (!of_device_is_available(remote)) { - of_node_put(remote); - return -ENODEV; - } - if (panel) { *panel = of_drm_find_panel(remote); if (!IS_ERR(*panel)) diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c index 6b0bf42039cf..ed7985c0535a 100644 --- a/drivers/gpu/drm/drm_panel.c +++ b/drivers/gpu/drm/drm_panel.c @@ -44,13 +44,21 @@ static LIST_HEAD(panel_list); /** * drm_panel_init - initialize a panel * @panel: DRM panel + * @dev: parent device of the panel + * @funcs: panel operations + * @connector_type: the connector type (DRM_MODE_CONNECTOR_*) corresponding to + * the panel interface * - * Sets up internal fields of the panel so that it can subsequently be added - * to the registry. + * Initialize the panel structure for subsequent registration with + * drm_panel_add(). */ -void drm_panel_init(struct drm_panel *panel) +void drm_panel_init(struct drm_panel *panel, struct device *dev, + const struct drm_panel_funcs *funcs, int connector_type) { INIT_LIST_HEAD(&panel->list); + panel->dev = dev; + panel->funcs = funcs; + panel->connector_type = connector_type; } EXPORT_SYMBOL(drm_panel_init); diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 0a2316e0e812..0814211b0f3f 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -713,6 +713,15 @@ int drm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) struct file *fil; int ret; + if (obj->funcs && obj->funcs->mmap) { + ret = obj->funcs->mmap(obj, vma); + if (ret) + return ret; + vma->vm_private_data = obj; + drm_gem_object_get(obj); + return 0; + } + priv = kzalloc(sizeof(*priv), GFP_KERNEL); fil = kzalloc(sizeof(*fil), GFP_KERNEL); if (!priv || !fil) { diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c index a17c8a14dba4..9a25d73c155c 100644 --- a/drivers/gpu/drm/drm_print.c +++ b/drivers/gpu/drm/drm_print.c @@ -28,6 +28,7 @@ #include <stdarg.h> #include <linux/io.h> +#include <linux/moduleparam.h> #include <linux/seq_file.h> #include <linux/slab.h> @@ -35,6 +36,24 @@ #include <drm/drm_drv.h> #include <drm/drm_print.h> +/* + * drm_debug: Enable debug output. + * Bitmask of DRM_UT_x. See include/drm/drm_print.h for details. + */ +unsigned int drm_debug; +EXPORT_SYMBOL(drm_debug); + +MODULE_PARM_DESC(debug, "Enable debug output, where each bit enables a debug category.\n" +"\t\tBit 0 (0x01) will enable CORE messages (drm core code)\n" +"\t\tBit 1 (0x02) will enable DRIVER messages (drm controller code)\n" +"\t\tBit 2 (0x04) will enable KMS messages (modesetting code)\n" +"\t\tBit 3 (0x08) will enable PRIME messages (prime code)\n" +"\t\tBit 4 (0x10) will enable ATOMIC messages (atomic code)\n" +"\t\tBit 5 (0x20) will enable VBL messages (vblank code)\n" +"\t\tBit 7 (0x80) will enable LEASE messages (leasing code)\n" +"\t\tBit 8 (0x100) will enable DP messages (displayport code)"); +module_param_named(debug, drm_debug, int, 0600); + void __drm_puts_coredump(struct drm_printer *p, const char *str) { struct drm_print_iterator *iterator = p->arg; @@ -147,6 +166,12 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf) } EXPORT_SYMBOL(__drm_printfn_debug); +void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf) +{ + pr_err("*ERROR* %s %pV", p->prefix, vaf); +} +EXPORT_SYMBOL(__drm_printfn_err); + /** * drm_puts - print a const string to a &drm_printer stream * @p: the &drm printer @@ -179,6 +204,37 @@ void drm_printf(struct drm_printer *p, const char *f, ...) } EXPORT_SYMBOL(drm_printf); +/** + * drm_print_bits - print bits to a &drm_printer stream + * + * Print bits (in flag fields for example) in human readable form. + * + * @p: the &drm_printer + * @value: field value. + * @bits: Array with bit names. + * @nbits: Size of bit names array. + */ +void drm_print_bits(struct drm_printer *p, unsigned long value, + const char * const bits[], unsigned int nbits) +{ + bool first = true; + unsigned int i; + + if (WARN_ON_ONCE(nbits > BITS_PER_TYPE(value))) + nbits = BITS_PER_TYPE(value); + + for_each_set_bit(i, &value, nbits) { + if (WARN_ON_ONCE(!bits[i])) + continue; + drm_printf(p, "%s%s", first ? "" : ",", + bits[i]); + first = false; + } + if (first) + drm_printf(p, "(none)"); +} +EXPORT_SYMBOL(drm_print_bits); + void drm_dev_printk(const struct device *dev, const char *level, const char *format, ...) { @@ -206,7 +262,7 @@ void drm_dev_dbg(const struct device *dev, unsigned int category, struct va_format vaf; va_list args; - if (!(drm_debug & category)) + if (!drm_debug_enabled(category)) return; va_start(args, format); @@ -229,7 +285,7 @@ void drm_dbg(unsigned int category, const char *format, ...) struct va_format vaf; va_list args; - if (!(drm_debug & category)) + if (!drm_debug_enabled(category)) return; va_start(args, format); diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c index ef2c468205a2..a7c87abe88d0 100644 --- a/drivers/gpu/drm/drm_probe_helper.c +++ b/drivers/gpu/drm/drm_probe_helper.c @@ -32,6 +32,7 @@ #include <linux/export.h> #include <linux/moduleparam.h> +#include <drm/drm_bridge.h> #include <drm/drm_client.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> @@ -92,7 +93,6 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode, struct drm_device *dev = connector->dev; enum drm_mode_status ret = MODE_OK; struct drm_encoder *encoder; - int i; /* Step 1: Validate against connector */ ret = drm_connector_mode_valid(connector, mode); @@ -100,7 +100,7 @@ drm_mode_validate_pipeline(struct drm_display_mode *mode, return ret; /* Step 2: Validate against encoders and crtcs */ - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { struct drm_crtc *crtc; ret = drm_encoder_mode_valid(encoder, mode); diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c index b11910f14c46..15fb516ae2d8 100644 --- a/drivers/gpu/drm/drm_simple_kms_helper.c +++ b/drivers/gpu/drm/drm_simple_kms_helper.c @@ -8,6 +8,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_simple_kms_helper.h> @@ -42,7 +43,7 @@ drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc, /* Anything goes */ return MODE_OK; - return pipe->funcs->mode_valid(crtc, mode); + return pipe->funcs->mode_valid(pipe, mode); } static int drm_simple_kms_crtc_check(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 4b5c7b0ed714..669c93fe2500 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -135,6 +135,7 @@ #include <drm/drm_gem.h> #include <drm/drm_print.h> #include <drm/drm_syncobj.h> +#include <drm/drm_utils.h> #include "drm_internal.h" @@ -1279,7 +1280,7 @@ drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) return -EOPNOTSUPP; - if (args->pad != 0) + if (args->flags != 0) return -EINVAL; if (args->count_handles == 0) @@ -1350,7 +1351,7 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE)) return -EOPNOTSUPP; - if (args->pad != 0) + if (args->flags & ~DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) return -EINVAL; if (args->count_handles == 0) @@ -1371,25 +1372,32 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data, fence = drm_syncobj_fence_get(syncobjs[i]); chain = to_dma_fence_chain(fence); if (chain) { - struct dma_fence *iter, *last_signaled = NULL; - - dma_fence_chain_for_each(iter, fence) { - if (iter->context != fence->context) { - dma_fence_put(iter); - /* It is most likely that timeline has - * unorder points. */ - break; + struct dma_fence *iter, *last_signaled = + dma_fence_get(fence); + + if (args->flags & + DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED) { + point = fence->seqno; + } else { + dma_fence_chain_for_each(iter, fence) { + if (iter->context != fence->context) { + dma_fence_put(iter); + /* It is most likely that timeline has + * unorder points. */ + break; + } + dma_fence_put(last_signaled); + last_signaled = dma_fence_get(iter); } - dma_fence_put(last_signaled); - last_signaled = dma_fence_get(iter); + point = dma_fence_is_signaled(last_signaled) ? + last_signaled->seqno : + to_dma_fence_chain(last_signaled)->prev_seqno; } - point = dma_fence_is_signaled(last_signaled) ? - last_signaled->seqno : - to_dma_fence_chain(last_signaled)->prev_seqno; dma_fence_put(last_signaled); } else { point = 0; } + dma_fence_put(fence); ret = copy_to_user(&points[i], &point, sizeof(uint64_t)); ret = ret ? -EFAULT : 0; if (ret) diff --git a/drivers/gpu/drm/drm_trace.h b/drivers/gpu/drm/drm_trace.h index 471eb927474b..11c6dd577e8e 100644 --- a/drivers/gpu/drm/drm_trace.h +++ b/drivers/gpu/drm/drm_trace.h @@ -13,17 +13,23 @@ struct drm_file; #define TRACE_INCLUDE_FILE drm_trace TRACE_EVENT(drm_vblank_event, - TP_PROTO(int crtc, unsigned int seq), - TP_ARGS(crtc, seq), + TP_PROTO(int crtc, unsigned int seq, ktime_t time, bool high_prec), + TP_ARGS(crtc, seq, time, high_prec), TP_STRUCT__entry( __field(int, crtc) __field(unsigned int, seq) + __field(ktime_t, time) + __field(bool, high_prec) ), TP_fast_assign( __entry->crtc = crtc; __entry->seq = seq; - ), - TP_printk("crtc=%d, seq=%u", __entry->crtc, __entry->seq) + __entry->time = time; + __entry->high_prec = high_prec; + ), + TP_printk("crtc=%d, seq=%u, time=%lld, high-prec=%s", + __entry->crtc, __entry->seq, __entry->time, + __entry->high_prec ? "true" : "false") ); TRACE_EVENT(drm_vblank_event_queued, diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index fd1fbc77871f..1659b13b178c 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -106,7 +106,7 @@ static void store_vblank(struct drm_device *dev, unsigned int pipe, write_seqlock(&vblank->seqlock); vblank->time = t_vblank; - vblank->count += vblank_count_inc; + atomic64_add(vblank_count_inc, &vblank->count); write_sequnlock(&vblank->seqlock); } @@ -272,7 +272,8 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, DRM_DEBUG_VBL("updating vblank count on crtc %u:" " current=%llu, diff=%u, hw=%u hw_last=%u\n", - pipe, vblank->count, diff, cur_vblank, vblank->last); + pipe, atomic64_read(&vblank->count), diff, + cur_vblank, vblank->last); if (diff == 0) { WARN_ON_ONCE(cur_vblank != vblank->last); @@ -294,11 +295,23 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, static u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + u64 count; if (WARN_ON(pipe >= dev->num_crtcs)) return 0; - return vblank->count; + count = atomic64_read(&vblank->count); + + /* + * This read barrier corresponds to the implicit write barrier of the + * write seqlock in store_vblank(). Note that this is the only place + * where we need an explicit barrier, since all other access goes + * through drm_vblank_count_and_time(), which already has the required + * read barrier curtesy of the read seqlock. + */ + smp_rmb(); + + return count; } /** @@ -319,7 +332,7 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc) u64 vblank; unsigned long flags; - WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp, + WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !dev->driver->get_vblank_timestamp, "This function requires support for accurate vblank timestamps."); spin_lock_irqsave(&dev->vblank_time_lock, flags); @@ -693,7 +706,7 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, */ *vblank_time = ktime_sub_ns(etime, delta_ns); - if ((drm_debug & DRM_UT_VBL) == 0) + if (!drm_debug_enabled(DRM_UT_VBL)) return true; ts_etime = ktime_to_timespec64(etime); @@ -763,6 +776,14 @@ drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe, * vblank interrupt (since it only reports the software vblank counter), see * drm_crtc_accurate_vblank_count() for such use-cases. * + * Note that for a given vblank counter value drm_crtc_handle_vblank() + * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time() + * provide a barrier: Any writes done before calling + * drm_crtc_handle_vblank() will be visible to callers of the later + * functions, iff the vblank count is the same or a later one. + * + * See also &drm_vblank_crtc.count. + * * Returns: * The software vblank counter. */ @@ -800,7 +821,7 @@ static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, do { seq = read_seqbegin(&vblank->seqlock); - vblank_count = vblank->count; + vblank_count = atomic64_read(&vblank->count); *vblanktime = vblank->time; } while (read_seqretry(&vblank->seqlock, seq)); @@ -817,6 +838,14 @@ static u64 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, * vblank events since the system was booted, including lost events due to * modesetting activity. Returns corresponding system timestamp of the time * of the vblank interval that corresponds to the current vblank counter value. + * + * Note that for a given vblank counter value drm_crtc_handle_vblank() + * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time() + * provide a barrier: Any writes done before calling + * drm_crtc_handle_vblank() will be visible to callers of the later + * functions, iff the vblank count is the same or a later one. + * + * See also &drm_vblank_crtc.count. */ u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, ktime_t *vblanktime) @@ -1323,7 +1352,7 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe) assert_spin_locked(&dev->vblank_time_lock); vblank = &dev->vblank[pipe]; - WARN_ONCE((drm_debug & DRM_UT_VBL) && !vblank->framedur_ns, + WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !vblank->framedur_ns, "Cannot compute missed vblanks without frame duration\n"); framedur_ns = vblank->framedur_ns; @@ -1581,7 +1610,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data, unsigned int flags, pipe, high_pipe; if (!dev->irq_enabled) - return -EINVAL; + return -EOPNOTSUPP; if (vblwait->request.type & _DRM_VBLANK_SIGNAL) return -EINVAL; @@ -1731,7 +1760,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe) send_vblank_event(dev, e, seq, now); } - trace_drm_vblank_event(pipe, seq); + trace_drm_vblank_event(pipe, seq, now, + dev->driver->get_vblank_timestamp != NULL); } /** @@ -1806,6 +1836,14 @@ EXPORT_SYMBOL(drm_handle_vblank); * * This is the native KMS version of drm_handle_vblank(). * + * Note that for a given vblank counter value drm_crtc_handle_vblank() + * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time() + * provide a barrier: Any writes done before calling + * drm_crtc_handle_vblank() will be visible to callers of the later + * functions, iff the vblank count is the same or a later one. + * + * See also &drm_vblank_crtc.count. + * * Returns: * True if the event was successfully handled, false on failure. */ @@ -1838,7 +1876,7 @@ int drm_crtc_get_sequence_ioctl(struct drm_device *dev, void *data, return -EOPNOTSUPP; if (!dev->irq_enabled) - return -EINVAL; + return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, get_seq->crtc_id); if (!crtc) @@ -1896,7 +1934,7 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data, return -EOPNOTSUPP; if (!dev->irq_enabled) - return -EINVAL; + return -EOPNOTSUPP; crtc = drm_crtc_find(dev, file_priv, queue_seq->crtc_id); if (!crtc) diff --git a/drivers/gpu/drm/drm_vram_helper_common.c b/drivers/gpu/drm/drm_vram_helper_common.c index e9c9f9a80ba3..2000d9b33fd5 100644 --- a/drivers/gpu/drm/drm_vram_helper_common.c +++ b/drivers/gpu/drm/drm_vram_helper_common.c @@ -7,9 +7,8 @@ * * This library provides &struct drm_gem_vram_object (GEM VRAM), a GEM * buffer object that is backed by video RAM. It can be used for - * framebuffer devices with dedicated memory. The video RAM can be - * managed with &struct drm_vram_mm (VRAM MM). Both data structures are - * supposed to be used together, but can also be used individually. + * framebuffer devices with dedicated memory. The video RAM is managed + * by &struct drm_vram_mm (VRAM MM). * * With the GEM interface userspace applications create, manage and destroy * graphics buffers, such as an on-screen framebuffer. GEM does not provide @@ -50,8 +49,7 @@ * // setup device, vram base and size * // ... * - * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size, - * &drm_gem_vram_mm_funcs); + * ret = drm_vram_helper_alloc_mm(dev, vram_base, vram_size); * if (ret) * return ret; * return 0; diff --git a/drivers/gpu/drm/drm_vram_mm_helper.c b/drivers/gpu/drm/drm_vram_mm_helper.c deleted file mode 100644 index c911781d6728..000000000000 --- a/drivers/gpu/drm/drm_vram_mm_helper.c +++ /dev/null @@ -1,297 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later - -#include <drm/drm_device.h> -#include <drm/drm_file.h> -#include <drm/drm_vram_mm_helper.h> - -#include <drm/ttm/ttm_page_alloc.h> - -/** - * DOC: overview - * - * The data structure &struct drm_vram_mm and its helpers implement a memory - * manager for simple framebuffer devices with dedicated video memory. Buffer - * objects are either placed in video RAM or evicted to system memory. These - * helper functions work well with &struct drm_gem_vram_object. - */ - -/* - * TTM TT - */ - -static void backend_func_destroy(struct ttm_tt *tt) -{ - ttm_tt_fini(tt); - kfree(tt); -} - -static struct ttm_backend_func backend_func = { - .destroy = backend_func_destroy -}; - -/* - * TTM BO device - */ - -static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo, - uint32_t page_flags) -{ - struct ttm_tt *tt; - int ret; - - tt = kzalloc(sizeof(*tt), GFP_KERNEL); - if (!tt) - return NULL; - - tt->func = &backend_func; - - ret = ttm_tt_init(tt, bo, page_flags); - if (ret < 0) - goto err_ttm_tt_init; - - return tt; - -err_ttm_tt_init: - kfree(tt); - return NULL; -} - -static int bo_driver_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) -{ - switch (type) { - case TTM_PL_SYSTEM: - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - break; - case TTM_PL_VRAM: - man->func = &ttm_bo_manager_func; - man->flags = TTM_MEMTYPE_FLAG_FIXED | - TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED | - TTM_PL_FLAG_WC; - man->default_caching = TTM_PL_FLAG_WC; - break; - default: - return -EINVAL; - } - return 0; -} - -static void bo_driver_evict_flags(struct ttm_buffer_object *bo, - struct ttm_placement *placement) -{ - struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev); - - if (vmm->funcs && vmm->funcs->evict_flags) - vmm->funcs->evict_flags(bo, placement); -} - -static int bo_driver_verify_access(struct ttm_buffer_object *bo, - struct file *filp) -{ - struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bo->bdev); - - if (!vmm->funcs || !vmm->funcs->verify_access) - return 0; - return vmm->funcs->verify_access(bo, filp); -} - -static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) -{ - struct ttm_mem_type_manager *man = bdev->man + mem->mem_type; - struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev); - - if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) - return -EINVAL; - - mem->bus.addr = NULL; - mem->bus.size = mem->num_pages << PAGE_SHIFT; - - switch (mem->mem_type) { - case TTM_PL_SYSTEM: /* nothing to do */ - mem->bus.offset = 0; - mem->bus.base = 0; - mem->bus.is_iomem = false; - break; - case TTM_PL_VRAM: - mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = vmm->vram_base; - mem->bus.is_iomem = true; - break; - default: - return -EINVAL; - } - - return 0; -} - -static void bo_driver_io_mem_free(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) -{ } - -static struct ttm_bo_driver bo_driver = { - .ttm_tt_create = bo_driver_ttm_tt_create, - .ttm_tt_populate = ttm_pool_populate, - .ttm_tt_unpopulate = ttm_pool_unpopulate, - .init_mem_type = bo_driver_init_mem_type, - .eviction_valuable = ttm_bo_eviction_valuable, - .evict_flags = bo_driver_evict_flags, - .verify_access = bo_driver_verify_access, - .io_mem_reserve = bo_driver_io_mem_reserve, - .io_mem_free = bo_driver_io_mem_free, -}; - -/* - * struct drm_vram_mm - */ - -/** - * drm_vram_mm_init() - Initialize an instance of VRAM MM. - * @vmm: the VRAM MM instance to initialize - * @dev: the DRM device - * @vram_base: the base address of the video memory - * @vram_size: the size of the video memory in bytes - * @funcs: callback functions for buffer objects - * - * Returns: - * 0 on success, or - * a negative error code otherwise. - */ -int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev, - uint64_t vram_base, size_t vram_size, - const struct drm_vram_mm_funcs *funcs) -{ - int ret; - - vmm->vram_base = vram_base; - vmm->vram_size = vram_size; - vmm->funcs = funcs; - - ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, - dev->anon_inode->i_mapping, - true); - if (ret) - return ret; - - ret = ttm_bo_init_mm(&vmm->bdev, TTM_PL_VRAM, vram_size >> PAGE_SHIFT); - if (ret) - return ret; - - return 0; -} -EXPORT_SYMBOL(drm_vram_mm_init); - -/** - * drm_vram_mm_cleanup() - Cleans up an initialized instance of VRAM MM. - * @vmm: the VRAM MM instance to clean up - */ -void drm_vram_mm_cleanup(struct drm_vram_mm *vmm) -{ - ttm_bo_device_release(&vmm->bdev); -} -EXPORT_SYMBOL(drm_vram_mm_cleanup); - -/** - * drm_vram_mm_mmap() - Helper for implementing &struct file_operations.mmap() - * @filp: the mapping's file structure - * @vma: the mapping's memory area - * @vmm: the VRAM MM instance - * - * Returns: - * 0 on success, or - * a negative error code otherwise. - */ -int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma, - struct drm_vram_mm *vmm) -{ - return ttm_bo_mmap(filp, vma, &vmm->bdev); -} -EXPORT_SYMBOL(drm_vram_mm_mmap); - -/* - * Helpers for integration with struct drm_device - */ - -/** - * drm_vram_helper_alloc_mm - Allocates a device's instance of \ - &struct drm_vram_mm - * @dev: the DRM device - * @vram_base: the base address of the video memory - * @vram_size: the size of the video memory in bytes - * @funcs: callback functions for buffer objects - * - * Returns: - * The new instance of &struct drm_vram_mm on success, or - * an ERR_PTR()-encoded errno code otherwise. - */ -struct drm_vram_mm *drm_vram_helper_alloc_mm( - struct drm_device *dev, uint64_t vram_base, size_t vram_size, - const struct drm_vram_mm_funcs *funcs) -{ - int ret; - - if (WARN_ON(dev->vram_mm)) - return dev->vram_mm; - - dev->vram_mm = kzalloc(sizeof(*dev->vram_mm), GFP_KERNEL); - if (!dev->vram_mm) - return ERR_PTR(-ENOMEM); - - ret = drm_vram_mm_init(dev->vram_mm, dev, vram_base, vram_size, funcs); - if (ret) - goto err_kfree; - - return dev->vram_mm; - -err_kfree: - kfree(dev->vram_mm); - dev->vram_mm = NULL; - return ERR_PTR(ret); -} -EXPORT_SYMBOL(drm_vram_helper_alloc_mm); - -/** - * drm_vram_helper_release_mm - Releases a device's instance of \ - &struct drm_vram_mm - * @dev: the DRM device - */ -void drm_vram_helper_release_mm(struct drm_device *dev) -{ - if (!dev->vram_mm) - return; - - drm_vram_mm_cleanup(dev->vram_mm); - kfree(dev->vram_mm); - dev->vram_mm = NULL; -} -EXPORT_SYMBOL(drm_vram_helper_release_mm); - -/* - * Helpers for &struct file_operations - */ - -/** - * drm_vram_mm_file_operations_mmap() - \ - Implements &struct file_operations.mmap() - * @filp: the mapping's file structure - * @vma: the mapping's memory area - * - * Returns: - * 0 on success, or - * a negative error code otherwise. - */ -int drm_vram_mm_file_operations_mmap( - struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *file_priv = filp->private_data; - struct drm_device *dev = file_priv->minor->dev; - - if (WARN_ONCE(!dev->vram_mm, "VRAM MM not initialized")) - return -EINVAL; - - return drm_vram_mm_mmap(filp, vma, dev->vram_mm); -} -EXPORT_SYMBOL(drm_vram_mm_file_operations_mmap); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c index 7e4e2959bf4f..32d9fac587f9 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_buffer.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_buffer.c @@ -326,7 +326,7 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, lockdep_assert_held(&gpu->lock); - if (drm_debug & DRM_UT_DRIVER) + if (drm_debug_enabled(DRM_UT_DRIVER)) etnaviv_buffer_dump(gpu, buffer, 0, 0x50); link_target = etnaviv_cmdbuf_get_va(cmdbuf, @@ -459,13 +459,13 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, etnaviv_cmdbuf_get_va(buffer, &gpu->mmu_context->cmdbuf_mapping) + buffer->user_size - 4); - if (drm_debug & DRM_UT_DRIVER) + if (drm_debug_enabled(DRM_UT_DRIVER)) pr_info("stream link to 0x%08x @ 0x%08x %p\n", return_target, etnaviv_cmdbuf_get_va(cmdbuf, &gpu->mmu_context->cmdbuf_mapping), cmdbuf->vaddr); - if (drm_debug & DRM_UT_DRIVER) { + if (drm_debug_enabled(DRM_UT_DRIVER)) { print_hex_dump(KERN_INFO, "cmd ", DUMP_PREFIX_OFFSET, 16, 4, cmdbuf->vaddr, cmdbuf->size, 0); @@ -484,6 +484,6 @@ void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state, VIV_FE_LINK_HEADER_PREFETCH(link_dwords), link_target); - if (drm_debug & DRM_UT_DRIVER) + if (drm_debug_enabled(DRM_UT_DRIVER)) etnaviv_buffer_dump(gpu, buffer, 0, 0x50); } diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index 3a0f0ba8c63a..1e6aa24bf45e 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -19,6 +19,7 @@ #include <drm/bridge/analogix_dp.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c index 6926cee91b36..72726f2c7a9f 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c @@ -24,6 +24,7 @@ #include <video/videomode.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_fb_helper.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_panel.h> diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index b78e8c5ba553..f41d75923557 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -21,6 +21,7 @@ #include <video/of_videomode.h> #include <video/videomode.h> +#include <drm/drm_bridge.h> #include <drm/drm_encoder.h> #include <drm/drm_print.h> diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index bc1565f1822a..48159d5d2214 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -34,6 +34,7 @@ #include <media/cec-notifier.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> @@ -852,6 +853,10 @@ static enum drm_connector_status hdmi_detect(struct drm_connector *connector, static void hdmi_connector_destroy(struct drm_connector *connector) { + struct hdmi_context *hdata = connector_to_hdmi(connector); + + cec_notifier_conn_unregister(hdata->notifier); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } @@ -935,6 +940,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder) { struct hdmi_context *hdata = encoder_to_hdmi(encoder); struct drm_connector *connector = &hdata->connector; + struct cec_connector_info conn_info; int ret; connector->interlace_allowed = true; @@ -957,6 +963,15 @@ static int hdmi_create_connector(struct drm_encoder *encoder) DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n"); } + cec_fill_conn_info_from_drm(&conn_info, connector); + + hdata->notifier = cec_notifier_conn_register(hdata->dev, NULL, + &conn_info); + if (!hdata->notifier) { + ret = -ENOMEM; + DRM_DEV_ERROR(hdata->dev, "Failed to allocate CEC notifier\n"); + } + return ret; } @@ -1528,8 +1543,8 @@ static void hdmi_disable(struct drm_encoder *encoder) */ mutex_unlock(&hdata->mutex); cancel_delayed_work(&hdata->hotplug_work); - cec_notifier_set_phys_addr(hdata->notifier, - CEC_PHYS_ADDR_INVALID); + if (hdata->notifier) + cec_notifier_phys_addr_invalidate(hdata->notifier); return; } @@ -2006,12 +2021,6 @@ static int hdmi_probe(struct platform_device *pdev) } } - hdata->notifier = cec_notifier_get(&pdev->dev); - if (hdata->notifier == NULL) { - ret = -ENOMEM; - goto err_hdmiphy; - } - pm_runtime_enable(dev); audio_infoframe = &hdata->audio.infoframe; @@ -2023,7 +2032,7 @@ static int hdmi_probe(struct platform_device *pdev) ret = hdmi_register_audio_device(hdata); if (ret) - goto err_notifier_put; + goto err_rpm_disable; ret = component_add(&pdev->dev, &hdmi_component_ops); if (ret) @@ -2034,8 +2043,7 @@ static int hdmi_probe(struct platform_device *pdev) err_unregister_audio: platform_device_unregister(hdata->audio.pdev); -err_notifier_put: - cec_notifier_put(hdata->notifier); +err_rpm_disable: pm_runtime_disable(dev); err_hdmiphy: @@ -2054,12 +2062,10 @@ static int hdmi_remove(struct platform_device *pdev) struct hdmi_context *hdata = platform_get_drvdata(pdev); cancel_delayed_work_sync(&hdata->hotplug_work); - cec_notifier_set_phys_addr(hdata->notifier, CEC_PHYS_ADDR_INVALID); component_del(&pdev->dev, &hdmi_component_ops); platform_device_unregister(hdata->audio.pdev); - cec_notifier_put(hdata->notifier); pm_runtime_disable(&pdev->dev); if (!IS_ERR(hdata->reg_hdmi_en)) diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 7b24338fad3c..6cfdb95fef2f 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1069,9 +1069,9 @@ static bool mixer_mode_fixup(struct exynos_drm_crtc *crtc, struct mixer_context *ctx = crtc->ctx; int width = mode->hdisplay, height = mode->vdisplay, i; - struct { + static const struct { int hdisplay, vdisplay, htotal, vtotal, scan_val; - } static const modes[] = { + } modes[] = { { 720, 480, 858, 525, MXR_CFG_SCAN_NTSC | MXR_CFG_SCAN_SD }, { 720, 576, 864, 625, MXR_CFG_SCAN_PAL | MXR_CFG_SCAN_SD }, { 1280, 720, 1650, 750, MXR_CFG_SCAN_HD_720 | MXR_CFG_SCAN_HD }, diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c index a92fd6c70b09..82c972e9c024 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c @@ -9,6 +9,7 @@ #include <linux/of_graph.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_probe_helper.h> diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c index f56852a503e8..8b784947ed3b 100644 --- a/drivers/gpu/drm/gma500/cdv_intel_display.c +++ b/drivers/gpu/drm/gma500/cdv_intel_display.c @@ -405,6 +405,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit, struct gma_crtc *gma_crtc = to_gma_crtc(crtc); struct gma_clock_t clock; + memset(&clock, 0, sizeof(clock)); + switch (refclk) { case 27000: if (target < 200000) { diff --git a/drivers/gpu/drm/gma500/mdfld_dsi_output.c b/drivers/gpu/drm/gma500/mdfld_dsi_output.c index 03023fa0fb6f..f350ac1ead18 100644 --- a/drivers/gpu/drm/gma500/mdfld_dsi_output.c +++ b/drivers/gpu/drm/gma500/mdfld_dsi_output.c @@ -498,7 +498,7 @@ void mdfld_dsi_output_init(struct drm_device *dev, return; } - /*create a new connetor*/ + /*create a new connector*/ dsi_connector = kzalloc(sizeof(struct mdfld_dsi_connector), GFP_KERNEL); if (!dsi_connector) { DRM_ERROR("No memory"); diff --git a/drivers/gpu/drm/gma500/oaktrail_crtc.c b/drivers/gpu/drm/gma500/oaktrail_crtc.c index 167c10767dd4..900e5499249d 100644 --- a/drivers/gpu/drm/gma500/oaktrail_crtc.c +++ b/drivers/gpu/drm/gma500/oaktrail_crtc.c @@ -129,6 +129,7 @@ static bool mrst_sdvo_find_best_pll(const struct gma_limit_t *limit, s32 freq_error, min_error = 100000; memset(best_clock, 0, sizeof(*best_clock)); + memset(&clock, 0, sizeof(clock)); for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) { for (clock.n = limit->n.min; clock.n <= limit->n.max; @@ -185,6 +186,7 @@ static bool mrst_lvds_find_best_pll(const struct gma_limit_t *limit, int err = target; memset(best_clock, 0, sizeof(*best_clock)); + memset(&clock, 0, sizeof(clock)); for (clock.m = limit->m.min; clock.m <= limit->m.max; clock.m++) { for (clock.p1 = limit->p1.min; clock.p1 <= limit->p1.max; diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig index 35a3c5f0c38c..dfc5aef62f7b 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig +++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig @@ -4,7 +4,8 @@ config DRM_HISI_HIBMC depends on DRM && PCI && MMU && ARM64 select DRM_KMS_HELPER select DRM_VRAM_HELPER - + select DRM_TTM + select DRM_TTM_HELPER help Choose this option if you have a Hisilicon Hibmc soc chipset. If M is selected the module will be called hibmc-drm. diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c index cc4c41748cfb..6527a97f68a3 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c @@ -96,7 +96,6 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane, { struct drm_plane_state *state = plane->state; u32 reg; - int ret; s64 gpu_addr = 0; unsigned int line_l; struct hibmc_drm_private *priv = plane->dev->dev_private; @@ -109,16 +108,9 @@ static void hibmc_plane_atomic_update(struct drm_plane *plane, hibmc_fb = to_hibmc_framebuffer(state->fb); gbo = drm_gem_vram_of_gem(hibmc_fb->obj); - ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); - if (ret) { - DRM_ERROR("failed to pin bo: %d", ret); - return; - } gpu_addr = drm_gem_vram_offset(gbo); - if (gpu_addr < 0) { - drm_gem_vram_unpin(gbo); - return; - } + if (WARN_ON_ONCE(gpu_addr < 0)) + return; /* Bug: we didn't pin the BO to VRAM in prepare_fb. */ writel(gpu_addr, priv->mmio + HIBMC_CRT_FB_ADDRESS); @@ -157,6 +149,8 @@ static struct drm_plane_funcs hibmc_plane_funcs = { }; static const struct drm_plane_helper_funcs hibmc_plane_helper_funcs = { + .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, + .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb, .atomic_check = hibmc_plane_atomic_check, .atomic_update = hibmc_plane_atomic_update, }; diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c index c103005b0a33..2fd4ca91a62d 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c @@ -22,15 +22,11 @@ #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> -#include <drm/drm_vram_mm_helper.h> #include "hibmc_drm_drv.h" #include "hibmc_drm_regs.h" -static const struct file_operations hibmc_fops = { - .owner = THIS_MODULE, - DRM_VRAM_MM_FILE_OPERATIONS -}; +DEFINE_DRM_GEM_FOPS(hibmc_fops); static irqreturn_t hibmc_drm_interrupt(int irq, void *arg) { diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c index 9f6e473e6295..21b684eab5c9 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c +++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c @@ -17,7 +17,6 @@ #include <drm/drm_gem.h> #include <drm/drm_gem_vram_helper.h> #include <drm/drm_print.h> -#include <drm/drm_vram_mm_helper.h> #include "hibmc_drm_drv.h" @@ -29,7 +28,7 @@ int hibmc_mm_init(struct hibmc_drm_private *hibmc) vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0), - hibmc->fb_size, &drm_gem_vram_mm_funcs); + hibmc->fb_size); if (IS_ERR(vmm)) { ret = PTR_ERR(vmm); DRM_ERROR("Error initializing VRAM MM; %d\n", ret); diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c index 5bf8138941de..bdcf9c6ae9e9 100644 --- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c +++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c @@ -18,6 +18,7 @@ #include <linux/platform_device.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_device.h> #include <drm/drm_encoder_slave.h> #include <drm/drm_mipi_dsi.h> diff --git a/drivers/gpu/drm/i2c/sil164_drv.c b/drivers/gpu/drm/i2c/sil164_drv.c index 8bcf0d199145..a839f78a4c8a 100644 --- a/drivers/gpu/drm/i2c/sil164_drv.c +++ b/drivers/gpu/drm/i2c/sil164_drv.c @@ -44,7 +44,7 @@ struct sil164_priv { ((struct sil164_priv *)to_encoder_slave(x)->slave_priv) #define sil164_dbg(client, format, ...) do { \ - if (drm_debug & DRM_UT_KMS) \ + if (drm_debug_enabled(DRM_UT_KMS)) \ dev_printk(KERN_DEBUG, &client->dev, \ "%s: " format, __func__, ## __VA_ARGS__); \ } while (0) diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c index 8039fc0d83db..5b03fdd1eaa4 100644 --- a/drivers/gpu/drm/i2c/tda9950.c +++ b/drivers/gpu/drm/i2c/tda9950.c @@ -420,7 +420,8 @@ static int tda9950_probe(struct i2c_client *client, priv->hdmi = glue->parent; priv->adap = cec_allocate_adapter(&tda9950_cec_ops, priv, "tda9950", - CEC_CAP_DEFAULTS, + CEC_CAP_DEFAULTS | + CEC_CAP_CONNECTOR_INFO, CEC_MAX_LOG_ADDRS); if (IS_ERR(priv->adap)) return PTR_ERR(priv->adap); @@ -457,13 +458,14 @@ static int tda9950_probe(struct i2c_client *client, if (ret < 0) return ret; - priv->notify = cec_notifier_get(priv->hdmi); + priv->notify = cec_notifier_cec_adap_register(priv->hdmi, NULL, + priv->adap); if (!priv->notify) return -ENOMEM; ret = cec_register_adapter(priv->adap, priv->hdmi); if (ret < 0) { - cec_notifier_put(priv->notify); + cec_notifier_cec_adap_unregister(priv->notify, priv->adap); return ret; } @@ -473,8 +475,6 @@ static int tda9950_probe(struct i2c_client *client, */ devm_remove_action(dev, tda9950_cec_del, priv); - cec_register_cec_notifier(priv->adap, priv->notify); - return 0; } @@ -482,8 +482,8 @@ static int tda9950_remove(struct i2c_client *client) { struct tda9950_priv *priv = i2c_get_clientdata(client); + cec_notifier_cec_adap_unregister(priv->notify, priv->adap); cec_unregister_adapter(priv->adap); - cec_notifier_put(priv->notify); return 0; } diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c index 84c6d4c91c65..a63790d32d75 100644 --- a/drivers/gpu/drm/i2c/tda998x_drv.c +++ b/drivers/gpu/drm/i2c/tda998x_drv.c @@ -14,6 +14,7 @@ #include <sound/hdmi-codec.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_of.h> #include <drm/drm_print.h> @@ -805,8 +806,8 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data) tda998x_edid_delay_start(priv); } else { schedule_work(&priv->detect_work); - cec_notifier_set_phys_addr(priv->cec_notify, - CEC_PHYS_ADDR_INVALID); + cec_notifier_phys_addr_invalidate( + priv->cec_notify); } handled = true; @@ -1790,8 +1791,7 @@ static void tda998x_destroy(struct device *dev) i2c_unregister_device(priv->cec); - if (priv->cec_notify) - cec_notifier_put(priv->cec_notify); + cec_notifier_conn_unregister(priv->cec_notify); } static int tda998x_create(struct device *dev) @@ -1916,7 +1916,7 @@ static int tda998x_create(struct device *dev) cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD); } - priv->cec_notify = cec_notifier_get(dev); + priv->cec_notify = cec_notifier_conn_register(dev, NULL, NULL); if (!priv->cec_notify) { ret = -ENOMEM; goto fail; diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c index 2a77823b8e9a..e66c38332df4 100644 --- a/drivers/gpu/drm/i810/i810_dma.c +++ b/drivers/gpu/drm/i810/i810_dma.c @@ -728,7 +728,7 @@ static void i810_dma_dispatch_vertex(struct drm_device *dev, if (nbox > I810_NR_SAREA_CLIPRECTS) nbox = I810_NR_SAREA_CLIPRECTS; - if (used > 4 * 1024) + if (used < 0 || used > 4 * 1024) used = 0; if (sarea_priv->dirty) @@ -1048,7 +1048,7 @@ static void i810_dma_dispatch_mc(struct drm_device *dev, struct drm_buf *buf, in if (u != I810_BUF_CLIENT) DRM_DEBUG("MC found buffer that isn't mine!\n"); - if (used > 4 * 1024) + if (used < 0 || used > 4 * 1024) used = 0; sarea_priv->dirty = 0x7f; diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig index 0d21402945ab..ba9595960bbe 100644 --- a/drivers/gpu/drm/i915/Kconfig +++ b/drivers/gpu/drm/i915/Kconfig @@ -76,7 +76,7 @@ config DRM_I915_CAPTURE_ERROR This option enables capturing the GPU state when a hang is detected. This information is vital for triaging hangs and assists in debugging. Please report any hang to - https://bugs.freedesktop.org/enter_bug.cgi?product=DRI + https://bugs.freedesktop.org/enter_bug.cgi?product=DRI for triaging. If in doubt, say "Y". @@ -105,11 +105,11 @@ config DRM_I915_USERPTR If in doubt, say "Y". config DRM_I915_GVT - bool "Enable Intel GVT-g graphics virtualization host support" - depends on DRM_I915 - depends on 64BIT - default n - help + bool "Enable Intel GVT-g graphics virtualization host support" + depends on DRM_I915 + depends on 64BIT + default n + help Choose this option if you want to enable Intel GVT-g graphics virtualization technology host support with integrated graphics. With GVT-g, it's possible to have one integrated graphics @@ -148,3 +148,9 @@ menu "drm/i915 Profile Guided Optimisation" depends on DRM_I915 source "drivers/gpu/drm/i915/Kconfig.profile" endmenu + +menu "drm/i915 Unstable Evolution" + visible if EXPERT && STAGING && BROKEN + depends on DRM_I915 + source "drivers/gpu/drm/i915/Kconfig.unstable" +endmenu diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 1400fce39c58..0b1f786a7ce9 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -1,33 +1,33 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_I915_WERROR - bool "Force GCC to throw an error instead of a warning when compiling" - # As this may inadvertently break the build, only allow the user - # to shoot oneself in the foot iff they aim really hard - depends on EXPERT - # We use the dependency on !COMPILE_TEST to not be enabled in - # allmodconfig or allyesconfig configurations - depends on !COMPILE_TEST + bool "Force GCC to throw an error instead of a warning when compiling" + # As this may inadvertently break the build, only allow the user + # to shoot oneself in the foot iff they aim really hard + depends on EXPERT + # We use the dependency on !COMPILE_TEST to not be enabled in + # allmodconfig or allyesconfig configurations + depends on !COMPILE_TEST select HEADER_TEST - default n - help - Add -Werror to the build flags for (and only for) i915.ko. - Do not enable this unless you are writing code for the i915.ko module. + default n + help + Add -Werror to the build flags for (and only for) i915.ko. + Do not enable this unless you are writing code for the i915.ko module. - Recommended for driver developers only. + Recommended for driver developers only. - If in doubt, say "N". + If in doubt, say "N". config DRM_I915_DEBUG - bool "Enable additional driver debugging" - depends on DRM_I915 - select DEBUG_FS - select PREEMPT_COUNT - select I2C_CHARDEV - select STACKDEPOT - select DRM_DP_AUX_CHARDEV - select X86_MSR # used by igt/pm_rpm - select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) - select DRM_DEBUG_MM if DRM=y + bool "Enable additional driver debugging" + depends on DRM_I915 + select DEBUG_FS + select PREEMPT_COUNT + select I2C_CHARDEV + select STACKDEPOT + select DRM_DP_AUX_CHARDEV + select X86_MSR # used by igt/pm_rpm + select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) + select DRM_DEBUG_MM if DRM=y select DRM_DEBUG_SELFTEST select DMABUF_SELFTESTS select SW_SYNC # signaling validation framework (igt/syncobj*) @@ -35,14 +35,14 @@ config DRM_I915_DEBUG select DRM_I915_SELFTEST select DRM_I915_DEBUG_RUNTIME_PM select DRM_I915_DEBUG_MMIO - default n - help - Choose this option to turn on extra driver debugging that may affect - performance but will catch some internal issues. + default n + help + Choose this option to turn on extra driver debugging that may affect + performance but will catch some internal issues. - Recommended for driver developers only. + Recommended for driver developers only. - If in doubt, say "N". + If in doubt, say "N". config DRM_I915_DEBUG_MMIO bool "Always insert extra checks around mmio access by default" @@ -58,16 +58,16 @@ config DRM_I915_DEBUG_MMIO If in doubt, say "N". config DRM_I915_DEBUG_GEM - bool "Insert extra checks into the GEM internals" - default n - depends on DRM_I915_WERROR - help - Enable extra sanity checks (including BUGs) along the GEM driver - paths that may slow the system down and if hit hang the machine. + bool "Insert extra checks into the GEM internals" + default n + depends on DRM_I915_WERROR + help + Enable extra sanity checks (including BUGs) along the GEM driver + paths that may slow the system down and if hit hang the machine. - Recommended for driver developers only. + Recommended for driver developers only. - If in doubt, say "N". + If in doubt, say "N". config DRM_I915_ERRLOG_GEM bool "Insert extra logging (very verbose) for common GEM errors" @@ -110,41 +110,41 @@ config DRM_I915_TRACE_GTT If in doubt, say "N". config DRM_I915_SW_FENCE_DEBUG_OBJECTS - bool "Enable additional driver debugging for fence objects" - depends on DRM_I915 - select DEBUG_OBJECTS - default n - help - Choose this option to turn on extra driver debugging that may affect - performance but will catch some internal issues. + bool "Enable additional driver debugging for fence objects" + depends on DRM_I915 + select DEBUG_OBJECTS + default n + help + Choose this option to turn on extra driver debugging that may affect + performance but will catch some internal issues. - Recommended for driver developers only. + Recommended for driver developers only. - If in doubt, say "N". + If in doubt, say "N". config DRM_I915_SW_FENCE_CHECK_DAG - bool "Enable additional driver debugging for detecting dependency cycles" - depends on DRM_I915 - default n - help - Choose this option to turn on extra driver debugging that may affect - performance but will catch some internal issues. + bool "Enable additional driver debugging for detecting dependency cycles" + depends on DRM_I915 + default n + help + Choose this option to turn on extra driver debugging that may affect + performance but will catch some internal issues. - Recommended for driver developers only. + Recommended for driver developers only. - If in doubt, say "N". + If in doubt, say "N". config DRM_I915_DEBUG_GUC - bool "Enable additional driver debugging for GuC" - depends on DRM_I915 - default n - help - Choose this option to turn on extra driver debugging that may affect - performance but will help resolve GuC related issues. + bool "Enable additional driver debugging for GuC" + depends on DRM_I915 + default n + help + Choose this option to turn on extra driver debugging that may affect + performance but will help resolve GuC related issues. - Recommended for driver developers only. + Recommended for driver developers only. - If in doubt, say "N". + If in doubt, say "N". config DRM_I915_SELFTEST bool "Enable selftests upon driver load" @@ -177,15 +177,15 @@ config DRM_I915_SELFTEST_BROKEN If in doubt, say "N". config DRM_I915_LOW_LEVEL_TRACEPOINTS - bool "Enable low level request tracing events" - depends on DRM_I915 - default n - help - Choose this option to turn on low level request tracing events. - This provides the ability to precisely monitor engine utilisation - and also analyze the request dependency resolving timeline. - - If in doubt, say "N". + bool "Enable low level request tracing events" + depends on DRM_I915 + default n + help + Choose this option to turn on low level request tracing events. + This provides the ability to precisely monitor engine utilisation + and also analyze the request dependency resolving timeline. + + If in doubt, say "N". config DRM_I915_DEBUG_VBLANK_EVADE bool "Enable extra debug warnings for vblank evasion" diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile index 48df8889a88a..1799537a3228 100644 --- a/drivers/gpu/drm/i915/Kconfig.profile +++ b/drivers/gpu/drm/i915/Kconfig.profile @@ -12,6 +12,29 @@ config DRM_I915_USERFAULT_AUTOSUSPEND May be 0 to disable the extra delay and solely use the device level runtime pm autosuspend delay tunable. +config DRM_I915_HEARTBEAT_INTERVAL + int "Interval between heartbeat pulses (ms)" + default 2500 # milliseconds + help + The driver sends a periodic heartbeat down all active engines to + check the health of the GPU and undertake regular house-keeping of + internal driver state. + + May be 0 to disable heartbeats and therefore disable automatic GPU + hang detection. + +config DRM_I915_PREEMPT_TIMEOUT + int "Preempt timeout (ms, jiffy granularity)" + default 100 # milliseconds + help + How long to wait (in milliseconds) for a preemption event to occur + when submitting a new context via execlists. If the current context + does not hit an arbitration point and yield to HW before the timer + expires, the HW will be reset to allow the more important context + to execute. + + May be 0 to disable the timeout. + config DRM_I915_SPIN_REQUEST int "Busywait for request completion (us)" default 5 # microseconds @@ -25,3 +48,29 @@ config DRM_I915_SPIN_REQUEST May be 0 to disable the initial spin. In practice, we estimate the cost of enabling the interrupt (if currently disabled) to be a few microseconds. + +config DRM_I915_STOP_TIMEOUT + int "How long to wait for an engine to quiesce gracefully before reset (ms)" + default 100 # milliseconds + help + By stopping submission and sleeping for a short time before resetting + the GPU, we allow the innocent contexts also on the system to quiesce. + It is then less likely for a hanging context to cause collateral + damage as the system is reset in order to recover. The corollary is + that the reset itself may take longer and so be more disruptive to + interactive or low latency workloads. + +config DRM_I915_TIMESLICE_DURATION + int "Scheduling quantum for userspace batches (ms, jiffy granularity)" + default 1 # milliseconds + help + When two user batches of equal priority are executing, we will + alternate execution of each batch to ensure forward progress of + all users. This is necessary in some cases where there may be + an implicit dependency between those batches that requires + concurrent execution in order for them to proceed, e.g. they + interact with each other via userspace semaphores. Each context + is scheduled for execution for the timeslice duration, before + switching to the next context. + + May be 0 to disable timeslicing. diff --git a/drivers/gpu/drm/i915/Kconfig.unstable b/drivers/gpu/drm/i915/Kconfig.unstable new file mode 100644 index 000000000000..0c2276155c2b --- /dev/null +++ b/drivers/gpu/drm/i915/Kconfig.unstable @@ -0,0 +1,29 @@ +# SPDX-License-Identifier: GPL-2.0-only +config DRM_I915_UNSTABLE + bool "Enable unstable API for early prototype development" + depends on EXPERT + depends on STAGING + depends on BROKEN # should never be enabled by distros! + # We use the dependency on !COMPILE_TEST to not be enabled in + # allmodconfig or allyesconfig configurations + depends on !COMPILE_TEST + default n + help + Enable prototype uAPI under general discussion before they are + finalized. Such prototypes may be withdrawn or substantially + changed before release. They are only enabled here so that a wide + number of interested parties (userspace driver developers) can + verify that the uAPI meet their expectations. These uAPI should + never be used in production. + + Recommended for driver developers _only_. + + If in the slightest bit of doubt, say "N". + +config DRM_I915_UNSTABLE_FAKE_LMEM + bool "Enable the experimental fake lmem" + depends on DRM_I915_UNSTABLE + default n + help + Convert some system memory into a fake local memory region for + testing. diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 2587ea834f06..90dcf09f52cc 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -46,10 +46,12 @@ i915-y += i915_drv.o \ i915_pci.o \ i915_scatterlist.o \ i915_suspend.o \ + i915_switcheroo.o \ i915_sysfs.o \ i915_utils.o \ intel_csr.o \ intel_device_info.o \ + intel_memory_region.o \ intel_pch.o \ intel_pm.o \ intel_runtime_pm.o \ @@ -76,19 +78,24 @@ gt-y += \ gt/intel_breadcrumbs.o \ gt/intel_context.o \ gt/intel_engine_cs.o \ - gt/intel_engine_pool.o \ + gt/intel_engine_heartbeat.o \ gt/intel_engine_pm.o \ + gt/intel_engine_pool.o \ gt/intel_engine_user.o \ gt/intel_gt.o \ gt/intel_gt_irq.o \ gt/intel_gt_pm.o \ gt/intel_gt_pm_irq.o \ - gt/intel_hangcheck.o \ + gt/intel_gt_requests.o \ + gt/intel_llc.o \ gt/intel_lrc.o \ + gt/intel_mocs.o \ + gt/intel_rc6.o \ gt/intel_renderstate.o \ gt/intel_reset.o \ - gt/intel_ringbuffer.o \ - gt/intel_mocs.o \ + gt/intel_ring.o \ + gt/intel_ring_submission.o \ + gt/intel_rps.o \ gt/intel_sseu.o \ gt/intel_timeline.o \ gt/intel_workarounds.o @@ -114,10 +121,12 @@ gem-y += \ gem/i915_gem_internal.o \ gem/i915_gem_object.o \ gem/i915_gem_object_blt.o \ + gem/i915_gem_lmem.o \ gem/i915_gem_mman.o \ gem/i915_gem_pages.o \ gem/i915_gem_phys.o \ gem/i915_gem_pm.o \ + gem/i915_gem_region.o \ gem/i915_gem_shmem.o \ gem/i915_gem_shrinker.o \ gem/i915_gem_stolen.o \ @@ -141,6 +150,7 @@ i915-y += \ i915_scheduler.o \ i915_trace_points.o \ i915_vma.o \ + intel_region_lmem.o \ intel_wopcm.o # general-purpose microcontroller (GuC) support @@ -172,6 +182,7 @@ i915-y += \ display/intel_display_power.o \ display/intel_dpio_phy.o \ display/intel_dpll_mgr.o \ + display/intel_dsb.o \ display/intel_fbc.o \ display/intel_fifo_underrun.o \ display/intel_frontbuffer.o \ @@ -182,7 +193,8 @@ i915-y += \ display/intel_psr.o \ display/intel_quirks.o \ display/intel_sprite.o \ - display/intel_tc.o + display/intel_tc.o \ + display/intel_vga.o i915-$(CONFIG_ACPI) += \ display/intel_acpi.o \ display/intel_opregion.o @@ -235,7 +247,8 @@ i915-y += \ oa/i915_oa_cflgt2.o \ oa/i915_oa_cflgt3.o \ oa/i915_oa_cnl.o \ - oa/i915_oa_icl.o + oa/i915_oa_icl.o \ + oa/i915_oa_tgl.o i915-y += i915_perf.o # Post-mortem debug and GPU hang state capture diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 6e398c33a524..325df29b0447 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1584,7 +1584,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv) encoder->get_hw_state = gen11_dsi_get_hw_state; encoder->type = INTEL_OUTPUT_DSI; encoder->cloneable = 0; - encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); + encoder->pipe_mask = ~0; encoder->power_domain = POWER_DOMAIN_PORT_DSI; encoder->get_power_domains = gen11_dsi_get_power_domains; diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index 7cb2257bbb93..c2875b10adf9 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -199,7 +199,6 @@ intel_crtc_duplicate_state(struct drm_crtc *crtc) crtc_state->disable_cxsr = false; crtc_state->update_wm_pre = false; crtc_state->update_wm_post = false; - crtc_state->fb_changed = false; crtc_state->fifo_changed = false; crtc_state->preload_luts = false; crtc_state->wm.need_postvbl_update = false; @@ -265,10 +264,13 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta */ mode = PS_SCALER_MODE_NORMAL; } else { + struct intel_plane *linked = + plane_state->planar_linked_plane; + mode = PS_SCALER_MODE_PLANAR; - if (plane_state->linked_plane) - mode |= PS_PLANE_Y_SEL(plane_state->linked_plane->id); + if (linked) + mode |= PS_PLANE_Y_SEL(linked->id); } } else if (INTEL_GEN(dev_priv) > 9 || IS_GEMINILAKE(dev_priv)) { mode = PS_SCALER_MODE_NORMAL; @@ -372,6 +374,15 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, */ if (!plane) { struct drm_plane_state *state; + + /* + * GLK+ scalers don't have a HQ mode so it + * isn't necessary to change between HQ and dyn mode + * on those platforms. + */ + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + continue; + plane = drm_plane_from_index(&dev_priv->drm, i); state = drm_atomic_get_plane_state(drm_state, plane); if (IS_ERR(state)) { @@ -379,13 +390,6 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, plane->base.id); return PTR_ERR(state); } - - /* - * the plane is added after plane checks are run, - * but since this plane is unchanged just do the - * minimum required validation. - */ - crtc_state->base.planes_changed = true; } intel_plane = to_intel_plane(plane); @@ -426,6 +430,13 @@ void intel_atomic_state_clear(struct drm_atomic_state *s) struct intel_atomic_state *state = to_intel_atomic_state(s); drm_atomic_state_default_clear(&state->base); state->dpll_set = state->modeset = false; + state->global_state_changed = false; + state->active_pipes = 0; + memset(&state->min_cdclk, 0, sizeof(state->min_cdclk)); + memset(&state->min_voltage_level, 0, sizeof(state->min_voltage_level)); + memset(&state->cdclk.logical, 0, sizeof(state->cdclk.logical)); + memset(&state->cdclk.actual, 0, sizeof(state->cdclk.actual)); + state->cdclk.pipe = INVALID_PIPE; } struct intel_crtc_state * @@ -439,3 +450,40 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state, return to_intel_crtc_state(crtc_state); } + +int intel_atomic_lock_global_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; + + state->global_state_changed = true; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + int ret; + + ret = drm_modeset_lock(&crtc->base.mutex, + state->base.acquire_ctx); + if (ret) + return ret; + } + + return 0; +} + +int intel_atomic_serialize_global_state(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc *crtc; + + state->global_state_changed = true; + + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state; + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } + + return 0; +} diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h index 58065d3161a3..49d5cb1b9e0a 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.h +++ b/drivers/gpu/drm/i915/display/intel_atomic.h @@ -16,6 +16,7 @@ struct drm_crtc_state; struct drm_device; struct drm_i915_private; struct drm_property; +struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; @@ -46,4 +47,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv, struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state); +int intel_atomic_lock_global_state(struct intel_atomic_state *state); + +int intel_atomic_serialize_global_state(struct intel_atomic_state *state); + #endif /* __INTEL_ATOMIC_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index d1fcdf206da4..98f557a9f8ee 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -138,18 +138,58 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state, return cpp * crtc_state->pixel_rate; } +bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state, + struct intel_plane *plane) +{ + struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct intel_plane_state *plane_state = + intel_atomic_get_new_plane_state(state, plane); + struct intel_crtc *crtc = to_intel_crtc(plane_state->base.crtc); + struct intel_crtc_state *crtc_state; + + if (!plane_state->base.visible || !plane->min_cdclk) + return false; + + crtc_state = intel_atomic_get_new_crtc_state(state, crtc); + + crtc_state->min_cdclk[plane->id] = + plane->min_cdclk(crtc_state, plane_state); + + /* + * Does the cdclk need to be bumbed up? + * + * Note: we obviously need to be called before the new + * cdclk frequency is calculated so state->cdclk.logical + * hasn't been populated yet. Hence we look at the old + * cdclk state under dev_priv->cdclk.logical. This is + * safe as long we hold at least one crtc mutex (which + * must be true since we have crtc_state). + */ + if (crtc_state->min_cdclk[plane->id] > dev_priv->cdclk.logical.cdclk) { + DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk (%d kHz) > logical cdclk (%d kHz)\n", + plane->base.base.id, plane->base.name, + crtc_state->min_cdclk[plane->id], + dev_priv->cdclk.logical.cdclk); + return true; + } + + return false; +} + int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state, const struct intel_plane_state *old_plane_state, struct intel_plane_state *new_plane_state) { struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane); + const struct drm_framebuffer *fb = new_plane_state->base.fb; int ret; new_crtc_state->active_planes &= ~BIT(plane->id); new_crtc_state->nv12_planes &= ~BIT(plane->id); new_crtc_state->c8_planes &= ~BIT(plane->id); new_crtc_state->data_rate[plane->id] = 0; + new_crtc_state->min_cdclk[plane->id] = 0; new_plane_state->base.visible = false; if (!new_plane_state->base.crtc && !old_plane_state->base.crtc) @@ -164,11 +204,11 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ new_crtc_state->active_planes |= BIT(plane->id); if (new_plane_state->base.visible && - is_planar_yuv_format(new_plane_state->base.fb->format->format)) + drm_format_info_is_yuv_semiplanar(fb->format)) new_crtc_state->nv12_planes |= BIT(plane->id); if (new_plane_state->base.visible && - new_plane_state->base.fb->format->format == DRM_FORMAT_C8) + fb->format->format == DRM_FORMAT_C8) new_crtc_state->c8_planes |= BIT(plane->id); if (new_plane_state->base.visible || old_plane_state->base.visible) @@ -194,14 +234,11 @@ get_crtc_from_states(const struct intel_plane_state *old_plane_state, return NULL; } -static int intel_plane_atomic_check(struct drm_plane *_plane, - struct drm_plane_state *_new_plane_state) +int intel_plane_atomic_check(struct intel_atomic_state *state, + struct intel_plane *plane) { - struct intel_plane *plane = to_intel_plane(_plane); - struct intel_atomic_state *state = - to_intel_atomic_state(_new_plane_state->state); struct intel_plane_state *new_plane_state = - to_intel_plane_state(_new_plane_state); + intel_atomic_get_new_plane_state(state, plane); const struct intel_plane_state *old_plane_state = intel_atomic_get_old_plane_state(state, plane); struct intel_crtc *crtc = @@ -320,9 +357,9 @@ void skl_update_planes_on_crtc(struct intel_atomic_state *state, if (new_plane_state->base.visible) { intel_update_plane(plane, new_crtc_state, new_plane_state); - } else if (new_plane_state->slave) { + } else if (new_plane_state->planar_slave) { struct intel_plane *master = - new_plane_state->linked_plane; + new_plane_state->planar_linked_plane; /* * We update the slave plane from this function because @@ -368,5 +405,4 @@ void i9xx_update_planes_on_crtc(struct intel_atomic_state *state, const struct drm_plane_helper_funcs intel_plane_helper_funcs = { .prepare_fb = intel_prepare_plane_fb, .cleanup_fb = intel_cleanup_plane_fb, - .atomic_check = intel_plane_atomic_check, }; diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index cb7ef4f9eafd..e61e9a82aadf 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -41,9 +41,13 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_ struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, struct intel_plane_state *intel_state); +int intel_plane_atomic_check(struct intel_atomic_state *state, + struct intel_plane *plane); int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *crtc_state, const struct intel_plane_state *old_plane_state, struct intel_plane_state *plane_state); +bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state, + struct intel_plane *plane); #endif /* __INTEL_ATOMIC_PLANE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c index ddcccf4408c3..85e6b2bbb34f 100644 --- a/drivers/gpu/drm/i915/display/intel_audio.c +++ b/drivers/gpu/drm/i915/display/intel_audio.c @@ -28,6 +28,7 @@ #include <drm/i915_component.h> #include "i915_drv.h" +#include "intel_atomic.h" #include "intel_audio.h" #include "intel_display_types.h" #include "intel_lpe_audio.h" @@ -560,8 +561,9 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder, u32 tmp, eldv; i915_reg_t aud_config, aud_cntrl_st2; - DRM_DEBUG_KMS("Disable audio codec on port %c, pipe %c\n", - port_name(port), pipe_name(pipe)); + DRM_DEBUG_KMS("Disable audio codec on [ENCODER:%d:%s], pipe %c\n", + encoder->base.base.id, encoder->base.name, + pipe_name(pipe)); if (WARN_ON(port == PORT_A)) return; @@ -609,8 +611,9 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder, int len, i; i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2; - DRM_DEBUG_KMS("Enable audio codec on port %c, pipe %c, %u bytes ELD\n", - port_name(port), pipe_name(pipe), drm_eld_size(eld)); + DRM_DEBUG_KMS("Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n", + encoder->base.base.id, encoder->base.name, + pipe_name(pipe), drm_eld_size(eld)); if (WARN_ON(port == PORT_A)) return; @@ -816,13 +819,8 @@ retry: to_intel_atomic_state(state)->cdclk.force_min_cdclk = enable ? 2 * 96000 : 0; - /* - * Protects dev_priv->cdclk.force_min_cdclk - * Need to lock this here in case we have no active pipes - * and thus wouldn't lock it during the commit otherwise. - */ - ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, - &ctx); + /* Protects dev_priv->cdclk.force_min_cdclk */ + ret = intel_atomic_lock_global_state(to_intel_atomic_state(state)); if (!ret) ret = drm_atomic_commit(state); @@ -850,11 +848,23 @@ static unsigned long i915_audio_component_get_power(struct device *kdev) ret = intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO); - /* Force CDCLK to 2*BCLK as long as we need audio to be powered. */ - if (dev_priv->audio_power_refcount++ == 0) - if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + if (dev_priv->audio_power_refcount++ == 0) { + if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) { + I915_WRITE(AUD_FREQ_CNTRL, dev_priv->audio_freq_cntrl); + DRM_DEBUG_KMS("restored AUD_FREQ_CNTRL to 0x%x\n", + dev_priv->audio_freq_cntrl); + } + + /* Force CDCLK to 2*BCLK as long as we need audio powered. */ + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, true); + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + I915_WRITE(AUD_PIN_BUF_CTL, + (I915_READ(AUD_PIN_BUF_CTL) | + AUD_PIN_BUF_ENABLE)); + } + return ret; } @@ -865,7 +875,7 @@ static void i915_audio_component_put_power(struct device *kdev, /* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */ if (--dev_priv->audio_power_refcount == 0) - if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) glk_force_audio_cdclk(dev_priv, false); intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO, cookie); @@ -1114,6 +1124,12 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv) return; } + if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) { + dev_priv->audio_freq_cntrl = I915_READ(AUD_FREQ_CNTRL); + DRM_DEBUG_KMS("init value of AUD_FREQ_CNTRL of 0x%x\n", + dev_priv->audio_freq_cntrl); + } + dev_priv->audio_component_registered = true; } diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 3250c1b8dcca..63c1bd4c2954 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -1399,6 +1399,7 @@ static enum port dvo_port_to_port(u8 dvo_port) [PORT_D] = { DVO_PORT_HDMID, DVO_PORT_DPD, -1}, [PORT_E] = { DVO_PORT_CRT, DVO_PORT_HDMIE, DVO_PORT_DPE}, [PORT_F] = { DVO_PORT_HDMIF, DVO_PORT_DPF, -1}, + [PORT_G] = { DVO_PORT_HDMIG, DVO_PORT_DPG, -1}, }; enum port port; int i; @@ -1625,7 +1626,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv, expected_size = 37; } else if (bdb->version <= 215) { expected_size = 38; - } else if (bdb->version <= 216) { + } else if (bdb->version <= 229) { expected_size = 39; } else { expected_size = sizeof(*child); @@ -1843,7 +1844,7 @@ void intel_bios_init(struct drm_i915_private *dev_priv) const struct bdb_header *bdb; u8 __iomem *bios = NULL; - if (!HAS_DISPLAY(dev_priv)) { + if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) { DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n"); return; } @@ -2258,6 +2259,9 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, case DP_AUX_F: aux_ch = AUX_CH_F; break; + case DP_AUX_G: + aux_ch = AUX_CH_G; + break; default: MISSING_CASE(info->alternate_aux_channel); aux_ch = AUX_CH_A; diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index 4969189e620f..98f064828a57 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -1,5 +1,5 @@ /* - * Copyright © 2016 Intel Corporation + * Copyright © 2016-2019 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -35,6 +35,7 @@ #include <drm/i915_drm.h> struct drm_i915_private; +enum port; enum intel_backlight_type { INTEL_BACKLIGHT_PMIC, diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index 688858ebe4d0..22e83f857de8 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -35,28 +35,54 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv, if (ret) return ret; - switch (val & 0xf) { - case 0: - qi->dram_type = INTEL_DRAM_DDR4; - break; - case 1: - qi->dram_type = INTEL_DRAM_DDR3; - break; - case 2: - qi->dram_type = INTEL_DRAM_LPDDR3; - break; - case 3: - qi->dram_type = INTEL_DRAM_LPDDR3; - break; - default: - MISSING_CASE(val & 0xf); - break; + if (IS_GEN(dev_priv, 12)) { + switch (val & 0xf) { + case 0: + qi->dram_type = INTEL_DRAM_DDR4; + break; + case 3: + qi->dram_type = INTEL_DRAM_LPDDR4; + break; + case 4: + qi->dram_type = INTEL_DRAM_DDR3; + break; + case 5: + qi->dram_type = INTEL_DRAM_LPDDR3; + break; + default: + MISSING_CASE(val & 0xf); + break; + } + } else if (IS_GEN(dev_priv, 11)) { + switch (val & 0xf) { + case 0: + qi->dram_type = INTEL_DRAM_DDR4; + break; + case 1: + qi->dram_type = INTEL_DRAM_DDR3; + break; + case 2: + qi->dram_type = INTEL_DRAM_LPDDR3; + break; + case 3: + qi->dram_type = INTEL_DRAM_LPDDR4; + break; + default: + MISSING_CASE(val & 0xf); + break; + } + } else { + MISSING_CASE(INTEL_GEN(dev_priv)); + qi->dram_type = INTEL_DRAM_LPDDR3; /* Conservative default */ } qi->num_channels = (val & 0xf0) >> 4; qi->num_points = (val & 0xf00) >> 8; - qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8; + if (IS_GEN(dev_priv, 12)) + qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 16; + else if (IS_GEN(dev_priv, 11)) + qi->t_bl = qi->dram_type == INTEL_DRAM_DDR4 ? 4 : 8; return 0; } @@ -132,20 +158,25 @@ static int icl_sagv_max_dclk(const struct intel_qgv_info *qi) } struct intel_sa_info { - u8 deburst, mpagesize, deprogbwlimit, displayrtids; + u16 displayrtids; + u8 deburst, deprogbwlimit; }; static const struct intel_sa_info icl_sa_info = { .deburst = 8, - .mpagesize = 16, .deprogbwlimit = 25, /* GB/s */ .displayrtids = 128, }; -static int icl_get_bw_info(struct drm_i915_private *dev_priv) +static const struct intel_sa_info tgl_sa_info = { + .deburst = 16, + .deprogbwlimit = 34, /* GB/s */ + .displayrtids = 256, +}; + +static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel_sa_info *sa) { struct intel_qgv_info qi = {}; - const struct intel_sa_info *sa = &icl_sa_info; bool is_y_tile = true; /* assume y tile may be used */ int num_channels; int deinterleave; @@ -233,14 +264,16 @@ static unsigned int icl_max_bw(struct drm_i915_private *dev_priv, void intel_bw_init_hw(struct drm_i915_private *dev_priv) { - if (IS_GEN(dev_priv, 11)) - icl_get_bw_info(dev_priv); + if (IS_GEN(dev_priv, 12)) + icl_get_bw_info(dev_priv, &tgl_sa_info); + else if (IS_GEN(dev_priv, 11)) + icl_get_bw_info(dev_priv, &icl_sa_info); } static unsigned int intel_max_data_rate(struct drm_i915_private *dev_priv, int num_planes) { - if (IS_GEN(dev_priv, 11)) + if (INTEL_GEN(dev_priv) >= 11) /* * FIXME with SAGV disabled maybe we can assume * point 1 will always be used? Seems to match diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index d0bc42e5039c..0caef2592a7e 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -21,6 +21,7 @@ * DEALINGS IN THE SOFTWARE. */ +#include "intel_atomic.h" #include "intel_cdclk.h" #include "intel_display_types.h" #include "intel_sideband.h" @@ -1161,28 +1162,88 @@ static void skl_uninit_cdclk(struct drm_i915_private *dev_priv) skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } -static int bxt_calc_cdclk(int min_cdclk) -{ - if (min_cdclk > 576000) - return 624000; - else if (min_cdclk > 384000) - return 576000; - else if (min_cdclk > 288000) - return 384000; - else if (min_cdclk > 144000) - return 288000; - else - return 144000; +static const struct intel_cdclk_vals bxt_cdclk_table[] = { + { .refclk = 19200, .cdclk = 144000, .divider = 8, .ratio = 60 }, + { .refclk = 19200, .cdclk = 288000, .divider = 4, .ratio = 60 }, + { .refclk = 19200, .cdclk = 384000, .divider = 3, .ratio = 60 }, + { .refclk = 19200, .cdclk = 576000, .divider = 2, .ratio = 60 }, + { .refclk = 19200, .cdclk = 624000, .divider = 2, .ratio = 65 }, + {} +}; + +static const struct intel_cdclk_vals glk_cdclk_table[] = { + { .refclk = 19200, .cdclk = 79200, .divider = 8, .ratio = 33 }, + { .refclk = 19200, .cdclk = 158400, .divider = 4, .ratio = 33 }, + { .refclk = 19200, .cdclk = 316800, .divider = 2, .ratio = 33 }, + {} +}; + +static const struct intel_cdclk_vals cnl_cdclk_table[] = { + { .refclk = 19200, .cdclk = 168000, .divider = 4, .ratio = 35 }, + { .refclk = 19200, .cdclk = 336000, .divider = 2, .ratio = 35 }, + { .refclk = 19200, .cdclk = 528000, .divider = 2, .ratio = 55 }, + + { .refclk = 24000, .cdclk = 168000, .divider = 4, .ratio = 28 }, + { .refclk = 24000, .cdclk = 336000, .divider = 2, .ratio = 28 }, + { .refclk = 24000, .cdclk = 528000, .divider = 2, .ratio = 44 }, + {} +}; + +static const struct intel_cdclk_vals icl_cdclk_table[] = { + { .refclk = 19200, .cdclk = 172800, .divider = 2, .ratio = 18 }, + { .refclk = 19200, .cdclk = 192000, .divider = 2, .ratio = 20 }, + { .refclk = 19200, .cdclk = 307200, .divider = 2, .ratio = 32 }, + { .refclk = 19200, .cdclk = 326400, .divider = 4, .ratio = 68 }, + { .refclk = 19200, .cdclk = 556800, .divider = 2, .ratio = 58 }, + { .refclk = 19200, .cdclk = 652800, .divider = 2, .ratio = 68 }, + + { .refclk = 24000, .cdclk = 180000, .divider = 2, .ratio = 15 }, + { .refclk = 24000, .cdclk = 192000, .divider = 2, .ratio = 16 }, + { .refclk = 24000, .cdclk = 312000, .divider = 2, .ratio = 26 }, + { .refclk = 24000, .cdclk = 324000, .divider = 4, .ratio = 54 }, + { .refclk = 24000, .cdclk = 552000, .divider = 2, .ratio = 46 }, + { .refclk = 24000, .cdclk = 648000, .divider = 2, .ratio = 54 }, + + { .refclk = 38400, .cdclk = 172800, .divider = 2, .ratio = 9 }, + { .refclk = 38400, .cdclk = 192000, .divider = 2, .ratio = 10 }, + { .refclk = 38400, .cdclk = 307200, .divider = 2, .ratio = 16 }, + { .refclk = 38400, .cdclk = 326400, .divider = 4, .ratio = 34 }, + { .refclk = 38400, .cdclk = 556800, .divider = 2, .ratio = 29 }, + { .refclk = 38400, .cdclk = 652800, .divider = 2, .ratio = 34 }, + {} +}; + +static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk) +{ + const struct intel_cdclk_vals *table = dev_priv->cdclk.table; + int i; + + for (i = 0; table[i].refclk; i++) + if (table[i].refclk == dev_priv->cdclk.hw.ref && + table[i].cdclk >= min_cdclk) + return table[i].cdclk; + + WARN(1, "Cannot satisfy minimum cdclk %d with refclk %u\n", + min_cdclk, dev_priv->cdclk.hw.ref); + return 0; } -static int glk_calc_cdclk(int min_cdclk) +static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) { - if (min_cdclk > 158400) - return 316800; - else if (min_cdclk > 79200) - return 158400; - else - return 79200; + const struct intel_cdclk_vals *table = dev_priv->cdclk.table; + int i; + + if (cdclk == dev_priv->cdclk.hw.bypass) + return 0; + + for (i = 0; table[i].refclk; i++) + if (table[i].refclk == dev_priv->cdclk.hw.ref && + table[i].cdclk == cdclk) + return dev_priv->cdclk.hw.ref * table[i].ratio; + + WARN(1, "cdclk %d not valid for refclk %u\n", + cdclk, dev_priv->cdclk.hw.ref); + return 0; } static u8 bxt_calc_voltage_level(int cdclk) @@ -1190,69 +1251,99 @@ static u8 bxt_calc_voltage_level(int cdclk) return DIV_ROUND_UP(cdclk, 25000); } -static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) +static u8 cnl_calc_voltage_level(int cdclk) { - int ratio; - - if (cdclk == dev_priv->cdclk.hw.bypass) + if (cdclk > 336000) + return 2; + else if (cdclk > 168000) + return 1; + else return 0; +} - switch (cdclk) { - default: - MISSING_CASE(cdclk); - /* fall through */ - case 144000: - case 288000: - case 384000: - case 576000: - ratio = 60; - break; - case 624000: - ratio = 65; - break; - } +static u8 icl_calc_voltage_level(int cdclk) +{ + if (cdclk > 556800) + return 2; + else if (cdclk > 312000) + return 1; + else + return 0; +} - return dev_priv->cdclk.hw.ref * ratio; +static u8 ehl_calc_voltage_level(int cdclk) +{ + if (cdclk > 312000) + return 2; + else if (cdclk > 180000) + return 1; + else + return 0; } -static int glk_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk) +static void cnl_readout_refclk(struct drm_i915_private *dev_priv, + struct intel_cdclk_state *cdclk_state) { - int ratio; + if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz) + cdclk_state->ref = 24000; + else + cdclk_state->ref = 19200; +} - if (cdclk == dev_priv->cdclk.hw.bypass) - return 0; +static void icl_readout_refclk(struct drm_i915_private *dev_priv, + struct intel_cdclk_state *cdclk_state) +{ + u32 dssm = I915_READ(SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK; - switch (cdclk) { + switch (dssm) { default: - MISSING_CASE(cdclk); + MISSING_CASE(dssm); /* fall through */ - case 79200: - case 158400: - case 316800: - ratio = 33; + case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: + cdclk_state->ref = 24000; + break; + case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz: + cdclk_state->ref = 19200; + break; + case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz: + cdclk_state->ref = 38400; break; } - - return dev_priv->cdclk.hw.ref * ratio; } -static void bxt_de_pll_update(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) +static void bxt_de_pll_readout(struct drm_i915_private *dev_priv, + struct intel_cdclk_state *cdclk_state) { - u32 val; + u32 val, ratio; - cdclk_state->ref = 19200; - cdclk_state->vco = 0; + if (INTEL_GEN(dev_priv) >= 11) + icl_readout_refclk(dev_priv, cdclk_state); + else if (IS_CANNONLAKE(dev_priv)) + cnl_readout_refclk(dev_priv, cdclk_state); + else + cdclk_state->ref = 19200; val = I915_READ(BXT_DE_PLL_ENABLE); - if ((val & BXT_DE_PLL_PLL_ENABLE) == 0) + if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 || + (val & BXT_DE_PLL_LOCK) == 0) { + /* + * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but + * setting it to zero is a way to signal that. + */ + cdclk_state->vco = 0; return; + } - if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0)) - return; + /* + * CNL+ have the ratio directly in the PLL enable register, gen9lp had + * it in a separate PLL control register. + */ + if (INTEL_GEN(dev_priv) >= 10) + ratio = val & CNL_CDCLK_PLL_RATIO_MASK; + else + ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK; - val = I915_READ(BXT_DE_PLL_CTL); - cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref; + cdclk_state->vco = ratio * cdclk_state->ref; } static void bxt_get_cdclk(struct drm_i915_private *dev_priv, @@ -1261,12 +1352,19 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, u32 divider; int div; - bxt_de_pll_update(dev_priv, cdclk_state); + bxt_de_pll_readout(dev_priv, cdclk_state); - cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref; + if (INTEL_GEN(dev_priv) >= 12) + cdclk_state->bypass = cdclk_state->ref / 2; + else if (INTEL_GEN(dev_priv) >= 11) + cdclk_state->bypass = 50000; + else + cdclk_state->bypass = cdclk_state->ref; - if (cdclk_state->vco == 0) + if (cdclk_state->vco == 0) { + cdclk_state->cdclk = cdclk_state->bypass; goto out; + } divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; @@ -1275,13 +1373,15 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, div = 2; break; case BXT_CDCLK_CD2X_DIV_SEL_1_5: - WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n"); + WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10, + "Unsupported divider\n"); div = 3; break; case BXT_CDCLK_CD2X_DIV_SEL_2: div = 4; break; case BXT_CDCLK_CD2X_DIV_SEL_4: + WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n"); div = 8; break; default: @@ -1297,7 +1397,7 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv, * at least what the CDCLK frequency requires. */ cdclk_state->voltage_level = - bxt_calc_voltage_level(cdclk_state->cdclk); + dev_priv->display.calc_voltage_level(cdclk_state->cdclk); } static void bxt_de_pll_disable(struct drm_i915_private *dev_priv) @@ -1332,259 +1432,6 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco) dev_priv->cdclk.hw.vco = vco; } -static void bxt_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state, - enum pipe pipe) -{ - int cdclk = cdclk_state->cdclk; - int vco = cdclk_state->vco; - u32 val, divider; - int ret; - - /* cdclk = vco / 2 / div{1,1.5,2,4} */ - switch (DIV_ROUND_CLOSEST(vco, cdclk)) { - default: - WARN_ON(cdclk != dev_priv->cdclk.hw.bypass); - WARN_ON(vco != 0); - /* fall through */ - case 2: - divider = BXT_CDCLK_CD2X_DIV_SEL_1; - break; - case 3: - WARN(IS_GEMINILAKE(dev_priv), "Unsupported divider\n"); - divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; - break; - case 4: - divider = BXT_CDCLK_CD2X_DIV_SEL_2; - break; - case 8: - divider = BXT_CDCLK_CD2X_DIV_SEL_4; - break; - } - - /* - * Inform power controller of upcoming frequency change. BSpec - * requires us to wait up to 150usec, but that leads to timeouts; - * the 2ms used here is based on experiment. - */ - ret = sandybridge_pcode_write_timeout(dev_priv, - HSW_PCODE_DE_WRITE_FREQ_REQ, - 0x80000000, 150, 2); - if (ret) { - DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n", - ret, cdclk); - return; - } - - if (dev_priv->cdclk.hw.vco != 0 && - dev_priv->cdclk.hw.vco != vco) - bxt_de_pll_disable(dev_priv); - - if (dev_priv->cdclk.hw.vco != vco) - bxt_de_pll_enable(dev_priv, vco); - - val = divider | skl_cdclk_decimal(cdclk); - if (pipe == INVALID_PIPE) - val |= BXT_CDCLK_CD2X_PIPE_NONE; - else - val |= BXT_CDCLK_CD2X_PIPE(pipe); - /* - * Disable SSA Precharge when CD clock frequency < 500 MHz, - * enable otherwise. - */ - if (cdclk >= 500000) - val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; - I915_WRITE(CDCLK_CTL, val); - - if (pipe != INVALID_PIPE) - intel_wait_for_vblank(dev_priv, pipe); - - /* - * The timeout isn't specified, the 2ms used here is based on - * experiment. - * FIXME: Waiting for the request completion could be delayed until - * the next PCODE request based on BSpec. - */ - ret = sandybridge_pcode_write_timeout(dev_priv, - HSW_PCODE_DE_WRITE_FREQ_REQ, - cdclk_state->voltage_level, 150, 2); - if (ret) { - DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", - ret, cdclk); - return; - } - - intel_update_cdclk(dev_priv); -} - -static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) -{ - u32 cdctl, expected; - - intel_update_cdclk(dev_priv); - intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); - - if (dev_priv->cdclk.hw.vco == 0 || - dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) - goto sanitize; - - /* DPLL okay; verify the cdclock - * - * Some BIOS versions leave an incorrect decimal frequency value and - * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4, - * so sanitize this register. - */ - cdctl = I915_READ(CDCLK_CTL); - /* - * Let's ignore the pipe field, since BIOS could have configured the - * dividers both synching to an active pipe, or asynchronously - * (PIPE_NONE). - */ - cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE; - - expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) | - skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk); - /* - * Disable SSA Precharge when CD clock frequency < 500 MHz, - * enable otherwise. - */ - if (dev_priv->cdclk.hw.cdclk >= 500000) - expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; - - if (cdctl == expected) - /* All well; nothing to sanitize */ - return; - -sanitize: - DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); - - /* force cdclk programming */ - dev_priv->cdclk.hw.cdclk = 0; - - /* force full PLL disable + enable */ - dev_priv->cdclk.hw.vco = -1; -} - -static void bxt_init_cdclk(struct drm_i915_private *dev_priv) -{ - struct intel_cdclk_state cdclk_state; - - bxt_sanitize_cdclk(dev_priv); - - if (dev_priv->cdclk.hw.cdclk != 0 && - dev_priv->cdclk.hw.vco != 0) - return; - - cdclk_state = dev_priv->cdclk.hw; - - /* - * FIXME: - * - The initial CDCLK needs to be read from VBT. - * Need to make this change after VBT has changes for BXT. - */ - if (IS_GEMINILAKE(dev_priv)) { - cdclk_state.cdclk = glk_calc_cdclk(0); - cdclk_state.vco = glk_de_pll_vco(dev_priv, cdclk_state.cdclk); - } else { - cdclk_state.cdclk = bxt_calc_cdclk(0); - cdclk_state.vco = bxt_de_pll_vco(dev_priv, cdclk_state.cdclk); - } - cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk); - - bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); -} - -static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) -{ - struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw; - - cdclk_state.cdclk = cdclk_state.bypass; - cdclk_state.vco = 0; - cdclk_state.voltage_level = bxt_calc_voltage_level(cdclk_state.cdclk); - - bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); -} - -static int cnl_calc_cdclk(int min_cdclk) -{ - if (min_cdclk > 336000) - return 528000; - else if (min_cdclk > 168000) - return 336000; - else - return 168000; -} - -static u8 cnl_calc_voltage_level(int cdclk) -{ - if (cdclk > 336000) - return 2; - else if (cdclk > 168000) - return 1; - else - return 0; -} - -static void cnl_cdclk_pll_update(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) -{ - u32 val; - - if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz) - cdclk_state->ref = 24000; - else - cdclk_state->ref = 19200; - - cdclk_state->vco = 0; - - val = I915_READ(BXT_DE_PLL_ENABLE); - if ((val & BXT_DE_PLL_PLL_ENABLE) == 0) - return; - - if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0)) - return; - - cdclk_state->vco = (val & CNL_CDCLK_PLL_RATIO_MASK) * cdclk_state->ref; -} - -static void cnl_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) -{ - u32 divider; - int div; - - cnl_cdclk_pll_update(dev_priv, cdclk_state); - - cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref; - - if (cdclk_state->vco == 0) - goto out; - - divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK; - - switch (divider) { - case BXT_CDCLK_CD2X_DIV_SEL_1: - div = 2; - break; - case BXT_CDCLK_CD2X_DIV_SEL_2: - div = 4; - break; - default: - MISSING_CASE(divider); - return; - } - - cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div); - - out: - /* - * Can't read this out :( Let's assume it's - * at least what the CDCLK frequency requires. - */ - cdclk_state->voltage_level = - cnl_calc_voltage_level(cdclk_state->cdclk); -} - static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv) { u32 val; @@ -1618,7 +1465,27 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco) dev_priv->cdclk.hw.vco = vco; } -static void cnl_set_cdclk(struct drm_i915_private *dev_priv, +static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) +{ + if (INTEL_GEN(dev_priv) >= 12) { + if (pipe == INVALID_PIPE) + return TGL_CDCLK_CD2X_PIPE_NONE; + else + return TGL_CDCLK_CD2X_PIPE(pipe); + } else if (INTEL_GEN(dev_priv) >= 11) { + if (pipe == INVALID_PIPE) + return ICL_CDCLK_CD2X_PIPE_NONE; + else + return ICL_CDCLK_CD2X_PIPE(pipe); + } else { + if (pipe == INVALID_PIPE) + return BXT_CDCLK_CD2X_PIPE_NONE; + else + return BXT_CDCLK_CD2X_PIPE(pipe); + } +} + +static void bxt_set_cdclk(struct drm_i915_private *dev_priv, const struct intel_cdclk_state *cdclk_state, enum pipe pipe) { @@ -1627,17 +1494,28 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, u32 val, divider; int ret; - ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, - SKL_CDCLK_PREPARE_FOR_CHANGE, - SKL_CDCLK_READY_FOR_CHANGE, - SKL_CDCLK_READY_FOR_CHANGE, 3); + /* Inform power controller of upcoming frequency change. */ + if (INTEL_GEN(dev_priv) >= 10) + ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, + SKL_CDCLK_PREPARE_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, + SKL_CDCLK_READY_FOR_CHANGE, 3); + else + /* + * BSpec requires us to wait up to 150usec, but that leads to + * timeouts; the 2ms used here is based on experiment. + */ + ret = sandybridge_pcode_write_timeout(dev_priv, + HSW_PCODE_DE_WRITE_FREQ_REQ, + 0x80000000, 150, 2); + if (ret) { - DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", - ret); + DRM_ERROR("Failed to inform PCU about cdclk change (err %d, freq %d)\n", + ret, cdclk); return; } - /* cdclk = vco / 2 / div{1,2} */ + /* cdclk = vco / 2 / div{1,1.5,2,4} */ switch (DIV_ROUND_CLOSEST(vco, cdclk)) { default: WARN_ON(cdclk != dev_priv->cdclk.hw.bypass); @@ -1646,67 +1524,87 @@ static void cnl_set_cdclk(struct drm_i915_private *dev_priv, case 2: divider = BXT_CDCLK_CD2X_DIV_SEL_1; break; + case 3: + WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10, + "Unsupported divider\n"); + divider = BXT_CDCLK_CD2X_DIV_SEL_1_5; + break; case 4: divider = BXT_CDCLK_CD2X_DIV_SEL_2; break; + case 8: + WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n"); + divider = BXT_CDCLK_CD2X_DIV_SEL_4; + break; } - if (dev_priv->cdclk.hw.vco != 0 && - dev_priv->cdclk.hw.vco != vco) - cnl_cdclk_pll_disable(dev_priv); - - if (dev_priv->cdclk.hw.vco != vco) - cnl_cdclk_pll_enable(dev_priv, vco); + if (INTEL_GEN(dev_priv) >= 10) { + if (dev_priv->cdclk.hw.vco != 0 && + dev_priv->cdclk.hw.vco != vco) + cnl_cdclk_pll_disable(dev_priv); - val = divider | skl_cdclk_decimal(cdclk); - if (pipe == INVALID_PIPE) - val |= BXT_CDCLK_CD2X_PIPE_NONE; - else - val |= BXT_CDCLK_CD2X_PIPE(pipe); - I915_WRITE(CDCLK_CTL, val); + if (dev_priv->cdclk.hw.vco != vco) + cnl_cdclk_pll_enable(dev_priv, vco); - if (pipe != INVALID_PIPE) - intel_wait_for_vblank(dev_priv, pipe); + } else { + if (dev_priv->cdclk.hw.vco != 0 && + dev_priv->cdclk.hw.vco != vco) + bxt_de_pll_disable(dev_priv); - /* inform PCU of the change */ - sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, - cdclk_state->voltage_level); + if (dev_priv->cdclk.hw.vco != vco) + bxt_de_pll_enable(dev_priv, vco); + } - intel_update_cdclk(dev_priv); + val = divider | skl_cdclk_decimal(cdclk) | + bxt_cdclk_cd2x_pipe(dev_priv, pipe); /* - * Can't read out the voltage level :( - * Let's just assume everything is as expected. + * Disable SSA Precharge when CD clock frequency < 500 MHz, + * enable otherwise. */ - dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level; -} + if (IS_GEN9_LP(dev_priv) && cdclk >= 500000) + val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; + I915_WRITE(CDCLK_CTL, val); -static int cnl_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) -{ - int ratio; + if (pipe != INVALID_PIPE) + intel_wait_for_vblank(dev_priv, pipe); - if (cdclk == dev_priv->cdclk.hw.bypass) - return 0; + if (INTEL_GEN(dev_priv) >= 10) { + ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, + cdclk_state->voltage_level); + } else { + /* + * The timeout isn't specified, the 2ms used here is based on + * experiment. + * FIXME: Waiting for the request completion could be delayed + * until the next PCODE request based on BSpec. + */ + ret = sandybridge_pcode_write_timeout(dev_priv, + HSW_PCODE_DE_WRITE_FREQ_REQ, + cdclk_state->voltage_level, + 150, 2); + } - switch (cdclk) { - default: - MISSING_CASE(cdclk); - /* fall through */ - case 168000: - case 336000: - ratio = dev_priv->cdclk.hw.ref == 19200 ? 35 : 28; - break; - case 528000: - ratio = dev_priv->cdclk.hw.ref == 19200 ? 55 : 44; - break; + if (ret) { + DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n", + ret, cdclk); + return; } - return dev_priv->cdclk.hw.ref * ratio; + intel_update_cdclk(dev_priv); + + if (INTEL_GEN(dev_priv) >= 10) + /* + * Can't read out the voltage level :( + * Let's just assume everything is as expected. + */ + dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level; } -static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv) +static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) { u32 cdctl, expected; + int cdclk, vco; intel_update_cdclk(dev_priv); intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); @@ -1727,262 +1625,65 @@ static void cnl_sanitize_cdclk(struct drm_i915_private *dev_priv) * dividers both synching to an active pipe, or asynchronously * (PIPE_NONE). */ - cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE; - - expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) | - skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk); - - if (cdctl == expected) - /* All well; nothing to sanitize */ - return; - -sanitize: - DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); + cdctl &= ~bxt_cdclk_cd2x_pipe(dev_priv, INVALID_PIPE); - /* force cdclk programming */ - dev_priv->cdclk.hw.cdclk = 0; + /* Make sure this is a legal cdclk value for the platform */ + cdclk = bxt_calc_cdclk(dev_priv, dev_priv->cdclk.hw.cdclk); + if (cdclk != dev_priv->cdclk.hw.cdclk) + goto sanitize; - /* force full PLL disable + enable */ - dev_priv->cdclk.hw.vco = -1; -} + /* Make sure the VCO is correct for the cdclk */ + vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); + if (vco != dev_priv->cdclk.hw.vco) + goto sanitize; -static int icl_calc_cdclk(int min_cdclk, unsigned int ref) -{ - static const int ranges_24[] = { 180000, 192000, 312000, 552000, 648000 }; - static const int ranges_19_38[] = { 172800, 192000, 307200, 556800, 652800 }; - const int *ranges; - int len, i; + expected = skl_cdclk_decimal(cdclk); - switch (ref) { - default: - MISSING_CASE(ref); - /* fall through */ - case 24000: - ranges = ranges_24; - len = ARRAY_SIZE(ranges_24); - break; - case 19200: - case 38400: - ranges = ranges_19_38; - len = ARRAY_SIZE(ranges_19_38); + /* Figure out what CD2X divider we should be using for this cdclk */ + switch (DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.vco, + dev_priv->cdclk.hw.cdclk)) { + case 2: + expected |= BXT_CDCLK_CD2X_DIV_SEL_1; break; - } - - for (i = 0; i < len; i++) { - if (min_cdclk <= ranges[i]) - return ranges[i]; - } - - WARN_ON(min_cdclk > ranges[len - 1]); - return ranges[len - 1]; -} - -static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) -{ - int ratio; - - if (cdclk == dev_priv->cdclk.hw.bypass) - return 0; - - switch (cdclk) { - default: - MISSING_CASE(cdclk); - /* fall through */ - case 172800: - case 307200: - case 556800: - case 652800: - WARN_ON(dev_priv->cdclk.hw.ref != 19200 && - dev_priv->cdclk.hw.ref != 38400); + case 3: + expected |= BXT_CDCLK_CD2X_DIV_SEL_1_5; break; - case 180000: - case 312000: - case 552000: - case 648000: - WARN_ON(dev_priv->cdclk.hw.ref != 24000); + case 4: + expected |= BXT_CDCLK_CD2X_DIV_SEL_2; break; - case 192000: - WARN_ON(dev_priv->cdclk.hw.ref != 19200 && - dev_priv->cdclk.hw.ref != 38400 && - dev_priv->cdclk.hw.ref != 24000); + case 8: + expected |= BXT_CDCLK_CD2X_DIV_SEL_4; break; - } - - ratio = cdclk / (dev_priv->cdclk.hw.ref / 2); - - return dev_priv->cdclk.hw.ref * ratio; -} - -static void icl_set_cdclk(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *cdclk_state, - enum pipe pipe) -{ - unsigned int cdclk = cdclk_state->cdclk; - unsigned int vco = cdclk_state->vco; - int ret; - - ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, - SKL_CDCLK_PREPARE_FOR_CHANGE, - SKL_CDCLK_READY_FOR_CHANGE, - SKL_CDCLK_READY_FOR_CHANGE, 3); - if (ret) { - DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", - ret); - return; - } - - if (dev_priv->cdclk.hw.vco != 0 && - dev_priv->cdclk.hw.vco != vco) - cnl_cdclk_pll_disable(dev_priv); - - if (dev_priv->cdclk.hw.vco != vco) - cnl_cdclk_pll_enable(dev_priv, vco); - - /* - * On ICL CD2X_DIV can only be 1, so we'll never end up changing the - * divider here synchronized to a pipe while CDCLK is on, nor will we - * need the corresponding vblank wait. - */ - I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE | - skl_cdclk_decimal(cdclk)); - - sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, - cdclk_state->voltage_level); - - intel_update_cdclk(dev_priv); - - /* - * Can't read out the voltage level :( - * Let's just assume everything is as expected. - */ - dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level; -} - -static u8 icl_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk) -{ - if (IS_ELKHARTLAKE(dev_priv)) { - if (cdclk > 312000) - return 2; - else if (cdclk > 180000) - return 1; - else - return 0; - } else { - if (cdclk > 556800) - return 2; - else if (cdclk > 312000) - return 1; - else - return 0; - } -} - -static void icl_get_cdclk(struct drm_i915_private *dev_priv, - struct intel_cdclk_state *cdclk_state) -{ - u32 val; - - cdclk_state->bypass = 50000; - - val = I915_READ(SKL_DSSM); - switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) { default: - MISSING_CASE(val); - /* fall through */ - case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: - cdclk_state->ref = 24000; - break; - case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz: - cdclk_state->ref = 19200; - break; - case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz: - cdclk_state->ref = 38400; - break; - } - - val = I915_READ(BXT_DE_PLL_ENABLE); - if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 || - (val & BXT_DE_PLL_LOCK) == 0) { - /* - * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but - * setting it to zero is a way to signal that. - */ - cdclk_state->vco = 0; - cdclk_state->cdclk = cdclk_state->bypass; - goto out; + goto sanitize; } - cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref; - - val = I915_READ(CDCLK_CTL); - WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0); - - cdclk_state->cdclk = cdclk_state->vco / 2; - -out: /* - * Can't read this out :( Let's assume it's - * at least what the CDCLK frequency requires. + * Disable SSA Precharge when CD clock frequency < 500 MHz, + * enable otherwise. */ - cdclk_state->voltage_level = - icl_calc_voltage_level(dev_priv, cdclk_state->cdclk); -} - -static void icl_init_cdclk(struct drm_i915_private *dev_priv) -{ - struct intel_cdclk_state sanitized_state; - u32 val; - - /* This sets dev_priv->cdclk.hw. */ - intel_update_cdclk(dev_priv); - intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); - - /* This means CDCLK disabled. */ - if (dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) - goto sanitize; - - val = I915_READ(CDCLK_CTL); - - if ((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0) - goto sanitize; - - if ((val & CDCLK_FREQ_DECIMAL_MASK) != - skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk)) - goto sanitize; + if (IS_GEN9_LP(dev_priv) && dev_priv->cdclk.hw.cdclk >= 500000) + expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE; - return; + if (cdctl == expected) + /* All well; nothing to sanitize */ + return; sanitize: DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); - sanitized_state.ref = dev_priv->cdclk.hw.ref; - sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref); - sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv, - sanitized_state.cdclk); - sanitized_state.voltage_level = - icl_calc_voltage_level(dev_priv, - sanitized_state.cdclk); - - icl_set_cdclk(dev_priv, &sanitized_state, INVALID_PIPE); -} - -static void icl_uninit_cdclk(struct drm_i915_private *dev_priv) -{ - struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw; - - cdclk_state.cdclk = cdclk_state.bypass; - cdclk_state.vco = 0; - cdclk_state.voltage_level = icl_calc_voltage_level(dev_priv, - cdclk_state.cdclk); + /* force cdclk programming */ + dev_priv->cdclk.hw.cdclk = 0; - icl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); + /* force full PLL disable + enable */ + dev_priv->cdclk.hw.vco = -1; } -static void cnl_init_cdclk(struct drm_i915_private *dev_priv) +static void bxt_init_cdclk(struct drm_i915_private *dev_priv) { struct intel_cdclk_state cdclk_state; - cnl_sanitize_cdclk(dev_priv); + bxt_sanitize_cdclk(dev_priv); if (dev_priv->cdclk.hw.cdclk != 0 && dev_priv->cdclk.hw.vco != 0) @@ -1990,22 +1691,29 @@ static void cnl_init_cdclk(struct drm_i915_private *dev_priv) cdclk_state = dev_priv->cdclk.hw; - cdclk_state.cdclk = cnl_calc_cdclk(0); - cdclk_state.vco = cnl_cdclk_pll_vco(dev_priv, cdclk_state.cdclk); - cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk); + /* + * FIXME: + * - The initial CDCLK needs to be read from VBT. + * Need to make this change after VBT has changes for BXT. + */ + cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0); + cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk); + cdclk_state.voltage_level = + dev_priv->display.calc_voltage_level(cdclk_state.cdclk); - cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); + bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } -static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv) +static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv) { struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw; cdclk_state.cdclk = cdclk_state.bypass; cdclk_state.vco = 0; - cdclk_state.voltage_level = cnl_calc_voltage_level(cdclk_state.cdclk); + cdclk_state.voltage_level = + dev_priv->display.calc_voltage_level(cdclk_state.cdclk); - cnl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); + bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE); } /** @@ -2019,14 +1727,10 @@ static void cnl_uninit_cdclk(struct drm_i915_private *dev_priv) */ void intel_cdclk_init(struct drm_i915_private *i915) { - if (INTEL_GEN(i915) >= 11) - icl_init_cdclk(i915); - else if (IS_CANNONLAKE(i915)) - cnl_init_cdclk(i915); + if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10) + bxt_init_cdclk(i915); else if (IS_GEN9_BC(i915)) skl_init_cdclk(i915); - else if (IS_GEN9_LP(i915)) - bxt_init_cdclk(i915); } /** @@ -2038,14 +1742,10 @@ void intel_cdclk_init(struct drm_i915_private *i915) */ void intel_cdclk_uninit(struct drm_i915_private *i915) { - if (INTEL_GEN(i915) >= 11) - icl_uninit_cdclk(i915); - else if (IS_CANNONLAKE(i915)) - cnl_uninit_cdclk(i915); + if (INTEL_GEN(i915) >= 10 || IS_GEN9_LP(i915)) + bxt_uninit_cdclk(i915); else if (IS_GEN9_BC(i915)) skl_uninit_cdclk(i915); - else if (IS_GEN9_LP(i915)) - bxt_uninit_cdclk(i915); } /** @@ -2073,9 +1773,9 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a, * Returns: * True if the CDCLK states require just a cd2x divider update, false if not. */ -bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *a, - const struct intel_cdclk_state *b) +static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv, + const struct intel_cdclk_state *a, + const struct intel_cdclk_state *b) { /* Older hw doesn't have the capability */ if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv)) @@ -2094,8 +1794,8 @@ bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv, * Returns: * True if the CDCLK states don't match, false if they do. */ -bool intel_cdclk_changed(const struct intel_cdclk_state *a, - const struct intel_cdclk_state *b) +static bool intel_cdclk_changed(const struct intel_cdclk_state *a, + const struct intel_cdclk_state *b) { return intel_cdclk_needs_modeset(a, b) || a->voltage_level != b->voltage_level; @@ -2200,9 +1900,11 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv, intel_set_cdclk(dev_priv, new_state, pipe); } -static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv, - int pixel_rate) +static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + int pixel_rate = crtc_state->pixel_rate; + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) return DIV_ROUND_UP(pixel_rate, 2); else if (IS_GEN(dev_priv, 9) || @@ -2210,10 +1912,25 @@ static int intel_pixel_rate_to_cdclk(struct drm_i915_private *dev_priv, return pixel_rate; else if (IS_CHERRYVIEW(dev_priv)) return DIV_ROUND_UP(pixel_rate * 100, 95); + else if (crtc_state->double_wide) + return DIV_ROUND_UP(pixel_rate * 100, 90 * 2); else return DIV_ROUND_UP(pixel_rate * 100, 90); } +static int intel_planes_min_cdclk(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_plane *plane; + int min_cdclk = 0; + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) + min_cdclk = max(crtc_state->min_cdclk[plane->id], min_cdclk); + + return min_cdclk; +} + int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = @@ -2223,7 +1940,7 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) if (!crtc_state->base.enable) return 0; - min_cdclk = intel_pixel_rate_to_cdclk(dev_priv, crtc_state->pixel_rate); + min_cdclk = intel_pixel_rate_to_cdclk(crtc_state); /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */ if (IS_BROADWELL(dev_priv) && hsw_crtc_state_ips_capable(crtc_state)) @@ -2282,6 +1999,9 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state) IS_GEMINILAKE(dev_priv)) min_cdclk = max(158400, min_cdclk); + /* Account for additional needs from the planes */ + min_cdclk = max(intel_planes_min_cdclk(crtc_state), min_cdclk); + if (min_cdclk > dev_priv->max_cdclk_freq) { DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n", min_cdclk, dev_priv->max_cdclk_freq); @@ -2303,11 +2023,20 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) sizeof(state->min_cdclk)); for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + int ret; + min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); if (min_cdclk < 0) return min_cdclk; + if (state->min_cdclk[i] == min_cdclk) + continue; + state->min_cdclk[i] = min_cdclk; + + ret = intel_atomic_lock_global_state(state); + if (ret) + return ret; } min_cdclk = state->cdclk.force_min_cdclk; @@ -2318,6 +2047,10 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) } /* + * Account for port clock min voltage level requirements. + * This only really does something on CNL+ but can be + * called on earlier platforms as well. + * * Note that this functions assumes that 0 is * the lowest voltage value, and higher values * correspond to increasingly higher voltages. @@ -2326,7 +2059,7 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state) * future platforms this code will need to be * adjusted. */ -static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state) +static int bxt_compute_min_voltage_level(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; @@ -2339,11 +2072,21 @@ static u8 cnl_compute_min_voltage_level(struct intel_atomic_state *state) sizeof(state->min_voltage_level)); for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + int ret; + if (crtc_state->base.enable) - state->min_voltage_level[i] = - crtc_state->min_voltage_level; + min_voltage_level = crtc_state->min_voltage_level; else - state->min_voltage_level[i] = 0; + min_voltage_level = 0; + + if (state->min_voltage_level[i] == min_voltage_level) + continue; + + state->min_voltage_level[i] = min_voltage_level; + + ret = intel_atomic_lock_global_state(state); + if (ret) + return ret; } min_voltage_level = 0; @@ -2369,7 +2112,7 @@ static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.logical.voltage_level = vlv_calc_voltage_level(dev_priv, cdclk); - if (!state->active_crtcs) { + if (!state->active_pipes) { cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); state->cdclk.actual.cdclk = cdclk; @@ -2400,7 +2143,7 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.logical.voltage_level = bdw_calc_voltage_level(cdclk); - if (!state->active_crtcs) { + if (!state->active_pipes) { cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk); state->cdclk.actual.cdclk = cdclk; @@ -2470,7 +2213,7 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state) state->cdclk.logical.voltage_level = skl_calc_voltage_level(cdclk); - if (!state->active_crtcs) { + if (!state->active_pipes) { cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco); state->cdclk.actual.vco = vco; @@ -2487,38 +2230,33 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state) static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - int min_cdclk, cdclk, vco; + int min_cdclk, min_voltage_level, cdclk, vco; min_cdclk = intel_compute_min_cdclk(state); if (min_cdclk < 0) return min_cdclk; - if (IS_GEMINILAKE(dev_priv)) { - cdclk = glk_calc_cdclk(min_cdclk); - vco = glk_de_pll_vco(dev_priv, cdclk); - } else { - cdclk = bxt_calc_cdclk(min_cdclk); - vco = bxt_de_pll_vco(dev_priv, cdclk); - } + min_voltage_level = bxt_compute_min_voltage_level(state); + if (min_voltage_level < 0) + return min_voltage_level; + + cdclk = bxt_calc_cdclk(dev_priv, min_cdclk); + vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); state->cdclk.logical.vco = vco; state->cdclk.logical.cdclk = cdclk; state->cdclk.logical.voltage_level = - bxt_calc_voltage_level(cdclk); - - if (!state->active_crtcs) { - if (IS_GEMINILAKE(dev_priv)) { - cdclk = glk_calc_cdclk(state->cdclk.force_min_cdclk); - vco = glk_de_pll_vco(dev_priv, cdclk); - } else { - cdclk = bxt_calc_cdclk(state->cdclk.force_min_cdclk); - vco = bxt_de_pll_vco(dev_priv, cdclk); - } + max_t(int, min_voltage_level, + dev_priv->display.calc_voltage_level(cdclk)); + + if (!state->active_pipes) { + cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk); + vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk); state->cdclk.actual.vco = vco; state->cdclk.actual.cdclk = cdclk; state->cdclk.actual.voltage_level = - bxt_calc_voltage_level(cdclk); + dev_priv->display.calc_voltage_level(cdclk); } else { state->cdclk.actual = state->cdclk.logical; } @@ -2526,70 +2264,138 @@ static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state) return 0; } -static int cnl_modeset_calc_cdclk(struct intel_atomic_state *state) +static int intel_modeset_all_pipes(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); - int min_cdclk, cdclk, vco; + struct intel_crtc *crtc; - min_cdclk = intel_compute_min_cdclk(state); - if (min_cdclk < 0) - return min_cdclk; + /* + * Add all pipes to the state, and force + * a modeset on all the active ones. + */ + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state; + int ret; - cdclk = cnl_calc_cdclk(min_cdclk); - vco = cnl_cdclk_pll_vco(dev_priv, cdclk); + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); - state->cdclk.logical.vco = vco; - state->cdclk.logical.cdclk = cdclk; - state->cdclk.logical.voltage_level = - max(cnl_calc_voltage_level(cdclk), - cnl_compute_min_voltage_level(state)); + if (!crtc_state->base.active || + drm_atomic_crtc_needs_modeset(&crtc_state->base)) + continue; - if (!state->active_crtcs) { - cdclk = cnl_calc_cdclk(state->cdclk.force_min_cdclk); - vco = cnl_cdclk_pll_vco(dev_priv, cdclk); + crtc_state->base.mode_changed = true; - state->cdclk.actual.vco = vco; - state->cdclk.actual.cdclk = cdclk; - state->cdclk.actual.voltage_level = - cnl_calc_voltage_level(cdclk); - } else { - state->cdclk.actual = state->cdclk.logical; + ret = drm_atomic_add_affected_connectors(&state->base, + &crtc->base); + if (ret) + return ret; + + ret = drm_atomic_add_affected_planes(&state->base, + &crtc->base); + if (ret) + return ret; + + crtc_state->update_planes |= crtc_state->active_planes; } return 0; } -static int icl_modeset_calc_cdclk(struct intel_atomic_state *state) +static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - unsigned int ref = state->cdclk.logical.ref; - int min_cdclk, cdclk, vco; + int min_cdclk; + /* + * We can't change the cdclk frequency, but we still want to + * check that the required minimum frequency doesn't exceed + * the actual cdclk frequency. + */ min_cdclk = intel_compute_min_cdclk(state); if (min_cdclk < 0) return min_cdclk; - cdclk = icl_calc_cdclk(min_cdclk, ref); - vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk); + return 0; +} - state->cdclk.logical.vco = vco; - state->cdclk.logical.cdclk = cdclk; - state->cdclk.logical.voltage_level = - max(icl_calc_voltage_level(dev_priv, cdclk), - cnl_compute_min_voltage_level(state)); +int intel_modeset_calc_cdclk(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + enum pipe pipe; + int ret; - if (!state->active_crtcs) { - cdclk = icl_calc_cdclk(state->cdclk.force_min_cdclk, ref); - vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk); + ret = dev_priv->display.modeset_calc_cdclk(state); + if (ret) + return ret; - state->cdclk.actual.vco = vco; - state->cdclk.actual.cdclk = cdclk; - state->cdclk.actual.voltage_level = - icl_calc_voltage_level(dev_priv, cdclk); + /* + * Writes to dev_priv->cdclk.{actual,logical} must protected + * by holding all the crtc mutexes even if we don't end up + * touching the hardware + */ + if (intel_cdclk_changed(&dev_priv->cdclk.actual, + &state->cdclk.actual)) { + /* + * Also serialize commits across all crtcs + * if the actual hw needs to be poked. + */ + ret = intel_atomic_serialize_global_state(state); + if (ret) + return ret; + } else if (intel_cdclk_changed(&dev_priv->cdclk.logical, + &state->cdclk.logical)) { + ret = intel_atomic_lock_global_state(state); + if (ret) + return ret; } else { - state->cdclk.actual = state->cdclk.logical; + return 0; + } + + if (is_power_of_2(state->active_pipes) && + intel_cdclk_needs_cd2x_update(dev_priv, + &dev_priv->cdclk.actual, + &state->cdclk.actual)) { + struct intel_crtc *crtc; + struct intel_crtc_state *crtc_state; + + pipe = ilog2(state->active_pipes); + crtc = intel_get_crtc_for_pipe(dev_priv, pipe); + + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + if (drm_atomic_crtc_needs_modeset(&crtc_state->base)) + pipe = INVALID_PIPE; + } else { + pipe = INVALID_PIPE; + } + + if (pipe != INVALID_PIPE) { + state->cdclk.pipe = pipe; + + DRM_DEBUG_KMS("Can change cdclk with pipe %c active\n", + pipe_name(pipe)); + } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual, + &state->cdclk.actual)) { + /* All pipes must be switched off while we change the cdclk. */ + ret = intel_modeset_all_pipes(state); + if (ret) + return ret; + + state->cdclk.pipe = INVALID_PIPE; + + DRM_DEBUG_KMS("Modeset required for cdclk change\n"); } + DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", + state->cdclk.logical.cdclk, + state->cdclk.actual.cdclk); + DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n", + state->cdclk.logical.voltage_level, + state->cdclk.actual.voltage_level); + return 0; } @@ -2809,15 +2615,29 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv) */ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) { - if (INTEL_GEN(dev_priv) >= 11) { - dev_priv->display.set_cdclk = icl_set_cdclk; - dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; + if (IS_ELKHARTLAKE(dev_priv)) { + dev_priv->display.set_cdclk = bxt_set_cdclk; + dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; + dev_priv->display.calc_voltage_level = ehl_calc_voltage_level; + dev_priv->cdclk.table = icl_cdclk_table; + } else if (INTEL_GEN(dev_priv) >= 11) { + dev_priv->display.set_cdclk = bxt_set_cdclk; + dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; + dev_priv->display.calc_voltage_level = icl_calc_voltage_level; + dev_priv->cdclk.table = icl_cdclk_table; } else if (IS_CANNONLAKE(dev_priv)) { - dev_priv->display.set_cdclk = cnl_set_cdclk; - dev_priv->display.modeset_calc_cdclk = cnl_modeset_calc_cdclk; + dev_priv->display.set_cdclk = bxt_set_cdclk; + dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; + dev_priv->display.calc_voltage_level = cnl_calc_voltage_level; + dev_priv->cdclk.table = cnl_cdclk_table; } else if (IS_GEN9_LP(dev_priv)) { dev_priv->display.set_cdclk = bxt_set_cdclk; dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk; + dev_priv->display.calc_voltage_level = bxt_calc_voltage_level; + if (IS_GEMINILAKE(dev_priv)) + dev_priv->cdclk.table = glk_cdclk_table; + else + dev_priv->cdclk.table = bxt_cdclk_table; } else if (IS_GEN9_BC(dev_priv)) { dev_priv->display.set_cdclk = skl_set_cdclk; dev_priv->display.modeset_calc_cdclk = skl_modeset_calc_cdclk; @@ -2830,13 +2650,11 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv) } else if (IS_VALLEYVIEW(dev_priv)) { dev_priv->display.set_cdclk = vlv_set_cdclk; dev_priv->display.modeset_calc_cdclk = vlv_modeset_calc_cdclk; + } else { + dev_priv->display.modeset_calc_cdclk = fixed_modeset_calc_cdclk; } - if (INTEL_GEN(dev_priv) >= 11) - dev_priv->display.get_cdclk = icl_get_cdclk; - else if (IS_CANNONLAKE(dev_priv)) - dev_priv->display.get_cdclk = cnl_get_cdclk; - else if (IS_GEN9_LP(dev_priv)) + if (INTEL_GEN(dev_priv) >= 10 || IS_GEN9_LP(dev_priv)) dev_priv->display.get_cdclk = bxt_get_cdclk; else if (IS_GEN9_BC(dev_priv)) dev_priv->display.get_cdclk = skl_get_cdclk; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index 4d6f7f5f8930..cf71394cc79c 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -15,6 +15,13 @@ struct intel_atomic_state; struct intel_cdclk_state; struct intel_crtc_state; +struct intel_cdclk_vals { + u16 refclk; + u32 cdclk; + u8 divider; /* CD2X divider * 2 */ + u8 ratio; +}; + int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state); void intel_cdclk_init(struct drm_i915_private *i915); void intel_cdclk_uninit(struct drm_i915_private *i915); @@ -22,13 +29,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv); void intel_update_max_cdclk(struct drm_i915_private *dev_priv); void intel_update_cdclk(struct drm_i915_private *dev_priv); void intel_update_rawclk(struct drm_i915_private *dev_priv); -bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv, - const struct intel_cdclk_state *a, - const struct intel_cdclk_state *b); bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a, const struct intel_cdclk_state *b); -bool intel_cdclk_changed(const struct intel_cdclk_state *a, - const struct intel_cdclk_state *b); void intel_cdclk_swap_state(struct intel_atomic_state *state); void intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv, @@ -42,5 +44,6 @@ intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv, enum pipe pipe); void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state, const char *context); +int intel_modeset_calc_cdclk(struct intel_atomic_state *state); #endif /* __INTEL_CDCLK_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c index aa1e2c670bc4..aa3a063549c3 100644 --- a/drivers/gpu/drm/i915/display/intel_color.c +++ b/drivers/gpu/drm/i915/display/intel_color.c @@ -43,6 +43,21 @@ #define LEGACY_LUT_LENGTH 256 /* + * ILK+ csc matrix: + * + * |R/Cr| | c0 c1 c2 | ( |R/Cr| |preoff0| ) |postoff0| + * |G/Y | = | c3 c4 c5 | x ( |G/Y | + |preoff1| ) + |postoff1| + * |B/Cb| | c6 c7 c8 | ( |B/Cb| |preoff2| ) |postoff2| + * + * ILK/SNB don't have explicit post offsets, and instead + * CSC_MODE_YUV_TO_RGB and CSC_BLACK_SCREEN_OFFSET are used: + * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=0 -> 1/2, 0, 1/2 + * CSC_MODE_YUV_TO_RGB=0 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/2, 1/16, 1/2 + * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=0 -> 0, 0, 0 + * CSC_MODE_YUV_TO_RGB=1 + CSC_BLACK_SCREEN_OFFSET=1 -> 1/16, 1/16, 1/16 + */ + +/* * Extract the CSC coefficient from a CTM coefficient (in U32.32 fixed point * format). This macro takes the coefficient we want transformed and the * number of fractional bits. @@ -59,37 +74,38 @@ #define ILK_CSC_POSTOFF_LIMITED_RANGE (16 * (1 << 12) / 255) +/* Nop pre/post offsets */ static const u16 ilk_csc_off_zero[3] = {}; +/* Identity matrix */ static const u16 ilk_csc_coeff_identity[9] = { ILK_CSC_COEFF_1_0, 0, 0, 0, ILK_CSC_COEFF_1_0, 0, 0, 0, ILK_CSC_COEFF_1_0, }; +/* Limited range RGB post offsets */ static const u16 ilk_csc_postoff_limited_range[3] = { ILK_CSC_POSTOFF_LIMITED_RANGE, ILK_CSC_POSTOFF_LIMITED_RANGE, ILK_CSC_POSTOFF_LIMITED_RANGE, }; +/* Full range RGB -> limited range RGB matrix */ static const u16 ilk_csc_coeff_limited_range[9] = { ILK_CSC_COEFF_LIMITED_RANGE, 0, 0, 0, ILK_CSC_COEFF_LIMITED_RANGE, 0, 0, 0, ILK_CSC_COEFF_LIMITED_RANGE, }; -/* - * These values are direct register values specified in the Bspec, - * for RGB->YUV conversion matrix (colorspace BT709) - */ +/* BT.709 full range RGB -> limited range YCbCr matrix */ static const u16 ilk_csc_coeff_rgb_to_ycbcr[9] = { 0x1e08, 0x9cc0, 0xb528, 0x2ba8, 0x09d8, 0x37e8, 0xbce8, 0x9ad8, 0x1e08, }; -/* Post offset values for RGB->YCBCR conversion */ +/* Limited range YCbCr post offsets */ static const u16 ilk_csc_postoff_rgb_to_ycbcr[3] = { 0x0800, 0x0100, 0x0800, }; @@ -611,12 +627,13 @@ static void bdw_load_lut_10(struct intel_crtc *crtc, static void ivb_load_lut_ext_max(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; /* Program the max register to clamp values > 1.0. */ - I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16); - I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16); - I915_WRITE(PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16); + intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 0), 1 << 16); + intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 1), 1 << 16); + intel_dsb_reg_write(dsb, PREC_PAL_EXT_GC_MAX(pipe, 2), 1 << 16); /* * Program the gc max 2 register to clamp values > 1.0. @@ -624,10 +641,15 @@ static void ivb_load_lut_ext_max(struct intel_crtc *crtc) * from 3.0 to 7.0 */ if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { - I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 0), 1 << 16); - I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 1), 1 << 16); - I915_WRITE(PREC_PAL_EXT2_GC_MAX(pipe, 2), 1 << 16); + intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 0), + 1 << 16); + intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 1), + 1 << 16); + intel_dsb_reg_write(dsb, PREC_PAL_EXT2_GC_MAX(pipe, 2), + 1 << 16); } + + intel_dsb_put(dsb); } static void ivb_load_luts(const struct intel_crtc_state *crtc_state) @@ -787,78 +809,83 @@ icl_load_gcmax(const struct intel_crtc_state *crtc_state, const struct drm_color_lut *color) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; /* Fixme: LUT entries are 16 bit only, so we can prog 0xFFFF max */ - I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), color->red); - I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), color->green); - I915_WRITE(PREC_PAL_GC_MAX(pipe, 2), color->blue); + intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 0), color->red); + intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 1), color->green); + intel_dsb_reg_write(dsb, PREC_PAL_GC_MAX(pipe, 2), color->blue); + intel_dsb_put(dsb); } static void icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_property_blob *blob = crtc_state->base.gamma_lut; const struct drm_color_lut *lut = blob->data; + struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; u32 i; /* - * Every entry in the multi-segment LUT is corresponding to a superfine - * segment step which is 1/(8 * 128 * 256). + * Program Super Fine segment (let's call it seg1)... * - * Superfine segment has 9 entries, corresponding to values - * 0, 1/(8 * 128 * 256), 2/(8 * 128 * 256) .... 8/(8 * 128 * 256). + * Super Fine segment's step is 1/(8 * 128 * 256) and it has + * 9 entries, corresponding to values 0, 1/(8 * 128 * 256), + * 2/(8 * 128 * 256) ... 8/(8 * 128 * 256). */ - I915_WRITE(PREC_PAL_MULTI_SEG_INDEX(pipe), PAL_PREC_AUTO_INCREMENT); + intel_dsb_reg_write(dsb, PREC_PAL_MULTI_SEG_INDEX(pipe), + PAL_PREC_AUTO_INCREMENT); for (i = 0; i < 9; i++) { const struct drm_color_lut *entry = &lut[i]; - I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe), - ilk_lut_12p4_ldw(entry)); - I915_WRITE(PREC_PAL_MULTI_SEG_DATA(pipe), - ilk_lut_12p4_udw(entry)); + intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe), + ilk_lut_12p4_ldw(entry)); + intel_dsb_indexed_reg_write(dsb, PREC_PAL_MULTI_SEG_DATA(pipe), + ilk_lut_12p4_udw(entry)); } + + intel_dsb_put(dsb); } static void icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); const struct drm_property_blob *blob = crtc_state->base.gamma_lut; const struct drm_color_lut *lut = blob->data; const struct drm_color_lut *entry; + struct intel_dsb *dsb = intel_dsb_get(crtc); enum pipe pipe = crtc->pipe; u32 i; /* - * * Program Fine segment (let's call it seg2)... * - * Fine segment's step is 1/(128 * 256) ie 1/(128 * 256), 2/(128*256) - * ... 256/(128*256). So in order to program fine segment of LUT we - * need to pick every 8'th entry in LUT, and program 256 indexes. + * Fine segment's step is 1/(128 * 256) i.e. 1/(128 * 256), 2/(128 * 256) + * ... 256/(128 * 256). So in order to program fine segment of LUT we + * need to pick every 8th entry in the LUT, and program 256 indexes. * * PAL_PREC_INDEX[0] and PAL_PREC_INDEX[1] map to seg2[1], - * with seg2[0] being unused by the hardware. + * seg2[0] being unused by the hardware. */ - I915_WRITE(PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT); + intel_dsb_reg_write(dsb, PREC_PAL_INDEX(pipe), PAL_PREC_AUTO_INCREMENT); for (i = 1; i < 257; i++) { entry = &lut[i * 8]; - I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry)); - I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry)); + intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe), + ilk_lut_12p4_ldw(entry)); + intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe), + ilk_lut_12p4_udw(entry)); } /* * Program Coarse segment (let's call it seg3)... * - * Coarse segment's starts from index 0 and it's step is 1/256 ie 0, - * 1/256, 2/256 ...256/256. As per the description of each entry in LUT + * Coarse segment starts from index 0 and it's step is 1/256 ie 0, + * 1/256, 2/256 ... 256/256. As per the description of each entry in LUT * above, we need to pick every (8 * 128)th entry in LUT, and * program 256 of those. * @@ -868,20 +895,24 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state) */ for (i = 0; i < 256; i++) { entry = &lut[i * 8 * 128]; - I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_ldw(entry)); - I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_12p4_udw(entry)); + intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe), + ilk_lut_12p4_ldw(entry)); + intel_dsb_indexed_reg_write(dsb, PREC_PAL_DATA(pipe), + ilk_lut_12p4_udw(entry)); } /* The last entry in the LUT is to be programmed in GCMAX */ entry = &lut[256 * 8 * 128]; icl_load_gcmax(crtc_state, entry); ivb_load_lut_ext_max(crtc); + intel_dsb_put(dsb); } static void icl_load_luts(const struct intel_crtc_state *crtc_state) { const struct drm_property_blob *gamma_lut = crtc_state->base.gamma_lut; struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct intel_dsb *dsb = intel_dsb_get(crtc); if (crtc_state->base.degamma_lut) glk_load_degamma_lut(crtc_state); @@ -890,16 +921,17 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state) case GAMMA_MODE_MODE_8BIT: i9xx_load_luts(crtc_state); break; - case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED: icl_program_gamma_superfine_segment(crtc_state); icl_program_gamma_multi_segment(crtc_state); break; - default: bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0)); ivb_load_lut_ext_max(crtc); } + + intel_dsb_commit(dsb); + intel_dsb_put(dsb); } static u32 chv_cgm_degamma_ldw(const struct drm_color_lut *color) @@ -1250,6 +1282,21 @@ static u32 ilk_gamma_mode(const struct intel_crtc_state *crtc_state) return GAMMA_MODE_MODE_10BIT; } +static u32 ilk_csc_mode(const struct intel_crtc_state *crtc_state) +{ + /* + * CSC comes after the LUT in RGB->YCbCr mode. + * RGB->YCbCr needs the limited range offsets added to + * the output. RGB limited range output is handled by + * the hw automagically elsewhere. + */ + if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) + return CSC_BLACK_SCREEN_OFFSET; + + return CSC_MODE_YUV_TO_RGB | + CSC_POSITION_BEFORE_GAMMA; +} + static int ilk_color_check(struct intel_crtc_state *crtc_state) { int ret; @@ -1263,15 +1310,15 @@ static int ilk_color_check(struct intel_crtc_state *crtc_state) !crtc_state->c8_planes; /* - * We don't expose the ctm on ilk/snb currently, - * nor do we enable YCbCr output. Also RGB limited - * range output is handled by the hw automagically. + * We don't expose the ctm on ilk/snb currently, also RGB + * limited range output is handled by the hw automagically. */ - crtc_state->csc_enable = false; + crtc_state->csc_enable = + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB; crtc_state->gamma_mode = ilk_gamma_mode(crtc_state); - crtc_state->csc_mode = 0; + crtc_state->csc_mode = ilk_csc_mode(crtc_state); ret = intel_color_add_affected_planes(crtc_state); if (ret) @@ -1432,6 +1479,403 @@ static int icl_color_check(struct intel_crtc_state *crtc_state) return 0; } +static int i9xx_gamma_precision(const struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->gamma_enable) + return 0; + + switch (crtc_state->gamma_mode) { + case GAMMA_MODE_MODE_8BIT: + return 8; + case GAMMA_MODE_MODE_10BIT: + return 16; + default: + MISSING_CASE(crtc_state->gamma_mode); + return 0; + } +} + +static int ilk_gamma_precision(const struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->gamma_enable) + return 0; + + if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0) + return 0; + + switch (crtc_state->gamma_mode) { + case GAMMA_MODE_MODE_8BIT: + return 8; + case GAMMA_MODE_MODE_10BIT: + return 10; + default: + MISSING_CASE(crtc_state->gamma_mode); + return 0; + } +} + +static int chv_gamma_precision(const struct intel_crtc_state *crtc_state) +{ + if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) + return 10; + else + return i9xx_gamma_precision(crtc_state); +} + +static int glk_gamma_precision(const struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->gamma_enable) + return 0; + + switch (crtc_state->gamma_mode) { + case GAMMA_MODE_MODE_8BIT: + return 8; + case GAMMA_MODE_MODE_10BIT: + return 10; + default: + MISSING_CASE(crtc_state->gamma_mode); + return 0; + } +} + +int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + if (HAS_GMCH(dev_priv)) { + if (IS_CHERRYVIEW(dev_priv)) + return chv_gamma_precision(crtc_state); + else + return i9xx_gamma_precision(crtc_state); + } else { + if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + return glk_gamma_precision(crtc_state); + else if (IS_IRONLAKE(dev_priv)) + return ilk_gamma_precision(crtc_state); + } + + return 0; +} + +static bool err_check(struct drm_color_lut *lut1, + struct drm_color_lut *lut2, u32 err) +{ + return ((abs((long)lut2->red - lut1->red)) <= err) && + ((abs((long)lut2->blue - lut1->blue)) <= err) && + ((abs((long)lut2->green - lut1->green)) <= err); +} + +static bool intel_color_lut_entry_equal(struct drm_color_lut *lut1, + struct drm_color_lut *lut2, + int lut_size, u32 err) +{ + int i; + + for (i = 0; i < lut_size; i++) { + if (!err_check(&lut1[i], &lut2[i], err)) + return false; + } + + return true; +} + +bool intel_color_lut_equal(struct drm_property_blob *blob1, + struct drm_property_blob *blob2, + u32 gamma_mode, u32 bit_precision) +{ + struct drm_color_lut *lut1, *lut2; + int lut_size1, lut_size2; + u32 err; + + if (!blob1 != !blob2) + return false; + + if (!blob1) + return true; + + lut_size1 = drm_color_lut_size(blob1); + lut_size2 = drm_color_lut_size(blob2); + + /* check sw and hw lut size */ + switch (gamma_mode) { + case GAMMA_MODE_MODE_8BIT: + case GAMMA_MODE_MODE_10BIT: + if (lut_size1 != lut_size2) + return false; + break; + default: + MISSING_CASE(gamma_mode); + return false; + } + + lut1 = blob1->data; + lut2 = blob2->data; + + err = 0xffff >> bit_precision; + + /* check sw and hw lut entry to be equal */ + switch (gamma_mode) { + case GAMMA_MODE_MODE_8BIT: + case GAMMA_MODE_MODE_10BIT: + if (!intel_color_lut_entry_equal(lut1, lut2, + lut_size2, err)) + return false; + break; + default: + MISSING_CASE(gamma_mode); + return false; + } + + return true; +} + +/* convert hw value with given bit_precision to lut property val */ +static u32 intel_color_lut_pack(u32 val, u32 bit_precision) +{ + u32 max = 0xffff >> (16 - bit_precision); + + val = clamp_val(val, 0, max); + + if (bit_precision < 16) + val <<= 16 - bit_precision; + + return val; +} + +static struct drm_property_blob * +i9xx_read_lut_8(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *blob_data; + u32 i, val; + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH, + NULL); + if (IS_ERR(blob)) + return NULL; + + blob_data = blob->data; + + for (i = 0; i < LEGACY_LUT_LENGTH; i++) { + if (HAS_GMCH(dev_priv)) + val = I915_READ(PALETTE(pipe, i)); + else + val = I915_READ(LGC_PALETTE(pipe, i)); + + blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( + LGC_PALETTE_RED_MASK, val), 8); + blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( + LGC_PALETTE_GREEN_MASK, val), 8); + blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( + LGC_PALETTE_BLUE_MASK, val), 8); + } + + return blob; +} + +static void i9xx_read_luts(struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->gamma_enable) + return; + + crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); +} + +static struct drm_property_blob * +i965_read_lut_10p6(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *blob_data; + u32 i, val1, val2; + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * lut_size, + NULL); + if (IS_ERR(blob)) + return NULL; + + blob_data = blob->data; + + for (i = 0; i < lut_size - 1; i++) { + val1 = I915_READ(PALETTE(pipe, 2 * i + 0)); + val2 = I915_READ(PALETTE(pipe, 2 * i + 1)); + + blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 | + REG_FIELD_GET(PALETTE_RED_MASK, val1); + blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 | + REG_FIELD_GET(PALETTE_GREEN_MASK, val1); + blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 | + REG_FIELD_GET(PALETTE_BLUE_MASK, val1); + } + + blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK, + I915_READ(PIPEGCMAX(pipe, 0))); + blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK, + I915_READ(PIPEGCMAX(pipe, 1))); + blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK, + I915_READ(PIPEGCMAX(pipe, 2))); + + return blob; +} + +static void i965_read_luts(struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->gamma_enable) + return; + + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) + crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); + else + crtc_state->base.gamma_lut = i965_read_lut_10p6(crtc_state); +} + +static struct drm_property_blob * +chv_read_cgm_lut(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *blob_data; + u32 i, val; + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * lut_size, + NULL); + if (IS_ERR(blob)) + return NULL; + + blob_data = blob->data; + + for (i = 0; i < lut_size; i++) { + val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 0)); + blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( + CGM_PIPE_GAMMA_GREEN_MASK, val), 10); + blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( + CGM_PIPE_GAMMA_BLUE_MASK, val), 10); + + val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 1)); + blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( + CGM_PIPE_GAMMA_RED_MASK, val), 10); + } + + return blob; +} + +static void chv_read_luts(struct intel_crtc_state *crtc_state) +{ + if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA) + crtc_state->base.gamma_lut = chv_read_cgm_lut(crtc_state); + else + i965_read_luts(crtc_state); +} + +static struct drm_property_blob * +ilk_read_lut_10(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size; + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *blob_data; + u32 i, val; + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * lut_size, + NULL); + if (IS_ERR(blob)) + return NULL; + + blob_data = blob->data; + + for (i = 0; i < lut_size; i++) { + val = I915_READ(PREC_PALETTE(pipe, i)); + + blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( + PREC_PALETTE_RED_MASK, val), 10); + blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( + PREC_PALETTE_GREEN_MASK, val), 10); + blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( + PREC_PALETTE_BLUE_MASK, val), 10); + } + + return blob; +} + +static void ilk_read_luts(struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->gamma_enable) + return; + + if ((crtc_state->csc_mode & CSC_POSITION_BEFORE_GAMMA) == 0) + return; + + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) + crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); + else + crtc_state->base.gamma_lut = ilk_read_lut_10(crtc_state); +} + +static struct drm_property_blob * +glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + int hw_lut_size = ivb_lut_10_size(prec_index); + enum pipe pipe = crtc->pipe; + struct drm_property_blob *blob; + struct drm_color_lut *blob_data; + u32 i, val; + + blob = drm_property_create_blob(&dev_priv->drm, + sizeof(struct drm_color_lut) * hw_lut_size, + NULL); + if (IS_ERR(blob)) + return NULL; + + blob_data = blob->data; + + I915_WRITE(PREC_PAL_INDEX(pipe), prec_index | + PAL_PREC_AUTO_INCREMENT); + + for (i = 0; i < hw_lut_size; i++) { + val = I915_READ(PREC_PAL_DATA(pipe)); + + blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET( + PREC_PAL_DATA_RED_MASK, val), 10); + blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET( + PREC_PAL_DATA_GREEN_MASK, val), 10); + blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET( + PREC_PAL_DATA_BLUE_MASK, val), 10); + } + + I915_WRITE(PREC_PAL_INDEX(pipe), 0); + + return blob; +} + +static void glk_read_luts(struct intel_crtc_state *crtc_state) +{ + if (!crtc_state->gamma_enable) + return; + + if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) + crtc_state->base.gamma_lut = i9xx_read_lut_8(crtc_state); + else + crtc_state->base.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0)); +} + void intel_color_init(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1444,14 +1888,17 @@ void intel_color_init(struct intel_crtc *crtc) dev_priv->display.color_check = chv_color_check; dev_priv->display.color_commit = i9xx_color_commit; dev_priv->display.load_luts = chv_load_luts; + dev_priv->display.read_luts = chv_read_luts; } else if (INTEL_GEN(dev_priv) >= 4) { dev_priv->display.color_check = i9xx_color_check; dev_priv->display.color_commit = i9xx_color_commit; dev_priv->display.load_luts = i965_load_luts; + dev_priv->display.read_luts = i965_read_luts; } else { dev_priv->display.color_check = i9xx_color_check; dev_priv->display.color_commit = i9xx_color_commit; dev_priv->display.load_luts = i9xx_load_luts; + dev_priv->display.read_luts = i9xx_read_luts; } } else { if (INTEL_GEN(dev_priv) >= 11) @@ -1470,16 +1917,19 @@ void intel_color_init(struct intel_crtc *crtc) else dev_priv->display.color_commit = ilk_color_commit; - if (INTEL_GEN(dev_priv) >= 11) + if (INTEL_GEN(dev_priv) >= 11) { dev_priv->display.load_luts = icl_load_luts; - else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) + } else if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv)) { dev_priv->display.load_luts = glk_load_luts; - else if (INTEL_GEN(dev_priv) >= 8) + dev_priv->display.read_luts = glk_read_luts; + } else if (INTEL_GEN(dev_priv) >= 8) { dev_priv->display.load_luts = bdw_load_luts; - else if (INTEL_GEN(dev_priv) >= 7) + } else if (INTEL_GEN(dev_priv) >= 7) { dev_priv->display.load_luts = ivb_load_luts; - else + } else { dev_priv->display.load_luts = ilk_load_luts; + dev_priv->display.read_luts = ilk_read_luts; + } } drm_crtc_enable_color_mgmt(&crtc->base, diff --git a/drivers/gpu/drm/i915/display/intel_color.h b/drivers/gpu/drm/i915/display/intel_color.h index 057e8ac63555..173727aaa24d 100644 --- a/drivers/gpu/drm/i915/display/intel_color.h +++ b/drivers/gpu/drm/i915/display/intel_color.h @@ -6,13 +6,20 @@ #ifndef __INTEL_COLOR_H__ #define __INTEL_COLOR_H__ +#include <linux/types.h> + struct intel_crtc_state; struct intel_crtc; +struct drm_property_blob; void intel_color_init(struct intel_crtc *crtc); int intel_color_check(struct intel_crtc_state *crtc_state); void intel_color_commit(const struct intel_crtc_state *crtc_state); void intel_color_load_luts(const struct intel_crtc_state *crtc_state); void intel_color_get_config(struct intel_crtc_state *crtc_state); +int intel_color_get_gamma_bit_precision(const struct intel_crtc_state *crtc_state); +bool intel_color_lut_equal(struct drm_property_blob *blob1, + struct drm_property_blob *blob2, + u32 gamma_mode, u32 bit_precision); #endif /* __INTEL_COLOR_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 308ec63207ee..1133c4e97bb4 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -277,7 +277,22 @@ intel_attach_aspect_ratio_property(struct drm_connector *connector) void intel_attach_colorspace_property(struct drm_connector *connector) { - if (!drm_mode_create_colorspace_property(connector)) - drm_object_attach_property(&connector->base, - connector->colorspace_property, 0); + switch (connector->connector_type) { + case DRM_MODE_CONNECTOR_HDMIA: + case DRM_MODE_CONNECTOR_HDMIB: + if (drm_mode_create_hdmi_colorspace_property(connector)) + return; + break; + case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_eDP: + if (drm_mode_create_dp_colorspace_property(connector)) + return; + break; + default: + DRM_DEBUG_KMS("Colorspace property not supported\n"); + return; + } + + drm_object_attach_property(&connector->base, + connector->colorspace_property, 0); } diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 0a08354a6183..39cc6d79dc85 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -844,7 +844,7 @@ load_detect: } /* for pre-945g platforms use load detect */ - ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx); + ret = intel_get_load_detect_pipe(connector, &tmp, ctx); if (ret > 0) { if (intel_crt_detect_ddc(connector)) status = connector_status_connected; @@ -1001,9 +1001,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv) crt->base.type = INTEL_OUTPUT_ANALOG; crt->base.cloneable = (1 << INTEL_OUTPUT_DVO) | (1 << INTEL_OUTPUT_HDMI); if (IS_I830(dev_priv)) - crt->base.crtc_mask = (1 << 0); + crt->base.pipe_mask = BIT(PIPE_A); else - crt->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + crt->base.pipe_mask = ~0; if (IS_GEN(dev_priv, 2)) connector->interlace_allowed = 0; diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 8eb2b3ec01ed..0d6e494b4508 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -45,6 +45,7 @@ #include "intel_lspcon.h" #include "intel_panel.h" #include "intel_psr.h" +#include "intel_sprite.h" #include "intel_tc.h" #include "intel_vdsc.h" @@ -586,6 +587,26 @@ static const struct icl_mg_phy_ddi_buf_trans icl_mg_phy_ddi_translations[] = { { 0x0, 0x00, 0x00 }, /* 3 0 */ }; +struct tgl_dkl_phy_ddi_buf_trans { + u32 dkl_vswing_control; + u32 dkl_preshoot_control; + u32 dkl_de_emphasis_control; +}; + +static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_ddi_translations[] = { + /* VS pre-emp Non-trans mV Pre-emph dB */ + { 0x7, 0x0, 0x00 }, /* 0 0 400mV 0 dB */ + { 0x5, 0x0, 0x03 }, /* 0 1 400mV 3.5 dB */ + { 0x2, 0x0, 0x0b }, /* 0 2 400mV 6 dB */ + { 0x0, 0x0, 0x19 }, /* 0 3 400mV 9.5 dB */ + { 0x5, 0x0, 0x00 }, /* 1 0 600mV 0 dB */ + { 0x2, 0x0, 0x03 }, /* 1 1 600mV 3.5 dB */ + { 0x0, 0x0, 0x14 }, /* 1 2 600mV 6 dB */ + { 0x2, 0x0, 0x00 }, /* 2 0 800mV 0 dB */ + { 0x0, 0x0, 0x0B }, /* 2 1 800mV 3.5 dB */ + { 0x0, 0x0, 0x00 }, /* 3 0 1200mV 0 dB HDMI default */ +}; + static const struct ddi_buf_trans * bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries) { @@ -872,7 +893,14 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(dev_priv) >= 12) { + if (intel_phy_is_combo(dev_priv, phy)) + icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, + 0, &n_entries); + else + n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations); + default_entry = n_entries - 1; + } else if (INTEL_GEN(dev_priv) == 11) { if (intel_phy_is_combo(dev_priv, phy)) icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI, 0, &n_entries); @@ -1049,6 +1077,8 @@ static u32 icl_pll_to_ddi_clk_sel(struct intel_encoder *encoder, case DPLL_ID_ICL_MGPLL2: case DPLL_ID_ICL_MGPLL3: case DPLL_ID_ICL_MGPLL4: + case DPLL_ID_TGL_MGPLL5: + case DPLL_ID_TGL_MGPLL6: return DDI_CLK_SEL_MG; } } @@ -1413,11 +1443,30 @@ static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv, ref_clock = dev_priv->cdclk.hw.ref; - m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; - m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; - m2_frac = (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) ? - (pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_FRAC_MASK) >> - MG_PLL_DIV0_FBDIV_FRAC_SHIFT : 0; + if (INTEL_GEN(dev_priv) >= 12) { + m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK; + m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT; + m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK; + + if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) { + m2_frac = pll_state->mg_pll_bias & + DKL_PLL_BIAS_FBDIV_FRAC_MASK; + m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT; + } else { + m2_frac = 0; + } + } else { + m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK; + m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK; + + if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) { + m2_frac = pll_state->mg_pll_div0 & + MG_PLL_DIV0_FBDIV_FRAC_MASK; + m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT; + } else { + m2_frac = 0; + } + } switch (pll_state->mg_clktop2_hsclkctl & MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) { @@ -1692,7 +1741,8 @@ static void intel_ddi_clock_get(struct intel_encoder *encoder, hsw_ddi_clock_get(encoder, pipe_config); } -void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) +void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -1704,44 +1754,50 @@ void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state) WARN_ON(transcoder_is_dsi(cpu_transcoder)); - temp = TRANS_MSA_SYNC_CLK; - - if (crtc_state->limited_color_range) - temp |= TRANS_MSA_CEA_RANGE; + temp = DP_MSA_MISC_SYNC_CLOCK; switch (crtc_state->pipe_bpp) { case 18: - temp |= TRANS_MSA_6_BPC; + temp |= DP_MSA_MISC_6_BPC; break; case 24: - temp |= TRANS_MSA_8_BPC; + temp |= DP_MSA_MISC_8_BPC; break; case 30: - temp |= TRANS_MSA_10_BPC; + temp |= DP_MSA_MISC_10_BPC; break; case 36: - temp |= TRANS_MSA_12_BPC; + temp |= DP_MSA_MISC_12_BPC; break; default: MISSING_CASE(crtc_state->pipe_bpp); break; } + /* nonsense combination */ + WARN_ON(crtc_state->limited_color_range && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); + + if (crtc_state->limited_color_range) + temp |= DP_MSA_MISC_COLOR_CEA_RGB; + /* * As per DP 1.2 spec section 2.3.4.3 while sending * YCBCR 444 signals we should program MSA MISC1/0 fields with - * colorspace information. The output colorspace encoding is BT601. + * colorspace information. */ if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) - temp |= TRANS_MSA_SAMPLING_444 | TRANS_MSA_CLRSP_YCBCR; + temp |= DP_MSA_MISC_COLOR_YCBCR_444_BT709; + /* * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication * of Color Encoding Format and Content Color Gamut] while sending - * YCBCR 420 signals we should program MSA MISC1 fields which - * indicate VSC SDP for the Pixel Encoding/Colorimetry Format. + * YCBCR 420, HDR BT.2020 signals we should program MSA MISC1 fields + * which indicate VSC SDP for the Pixel Encoding/Colorimetry Format. */ - if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) - temp |= TRANS_MSA_USE_VSC_SDP; + if (intel_dp_needs_vsc_sdp(crtc_state, conn_state)) + temp |= DP_MSA_MISC_COLOR_VSC_SDP; + I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp); } @@ -1761,7 +1817,14 @@ void intel_ddi_set_vc_payload_alloc(const struct intel_crtc_state *crtc_state, I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); } -void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) +/* + * Returns the TRANS_DDI_FUNC_CTL value based on CRTC state. + * + * Only intended to be used by intel_ddi_enable_transcoder_func() and + * intel_ddi_config_transcoder_func(). + */ +static u32 +intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct intel_encoder *encoder = intel_ddi_get_crtc_encoder(crtc); @@ -1840,11 +1903,42 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { temp |= TRANS_DDI_MODE_SELECT_DP_MST; temp |= DDI_PORT_WIDTH(crtc_state->lane_count); + + if (INTEL_GEN(dev_priv) >= 12) + temp |= TRANS_DDI_MST_TRANSPORT_SELECT(crtc_state->cpu_transcoder); } else { temp |= TRANS_DDI_MODE_SELECT_DP_SST; temp |= DDI_PORT_WIDTH(crtc_state->lane_count); } + return temp; +} + +void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 temp; + + temp = intel_ddi_transcoder_func_reg_val_get(crtc_state); + I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); +} + +/* + * Same as intel_ddi_enable_transcoder_func(), but it does not set the enable + * bit. + */ +static void +intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 temp; + + temp = intel_ddi_transcoder_func_reg_val_get(crtc_state); + temp &= ~TRANS_DDI_FUNC_ENABLE; I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp); } @@ -2045,18 +2139,20 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder, } if (!*pipe_mask) - DRM_DEBUG_KMS("No pipe for ddi port %c found\n", - port_name(port)); + DRM_DEBUG_KMS("No pipe for [ENCODER:%d:%s] found\n", + encoder->base.base.id, encoder->base.name); if (!mst_pipe_mask && hweight8(*pipe_mask) > 1) { - DRM_DEBUG_KMS("Multiple pipes for non DP-MST port %c (pipe_mask %02x)\n", - port_name(port), *pipe_mask); + DRM_DEBUG_KMS("Multiple pipes for [ENCODER:%d:%s] (pipe_mask %02x)\n", + encoder->base.base.id, encoder->base.name, + *pipe_mask); *pipe_mask = BIT(ffs(*pipe_mask) - 1); } if (mst_pipe_mask && mst_pipe_mask != *pipe_mask) - DRM_DEBUG_KMS("Conflicting MST and non-MST encoders for port %c (pipe_mask %02x mst_pipe_mask %02x)\n", - port_name(port), *pipe_mask, mst_pipe_mask); + DRM_DEBUG_KMS("Conflicting MST and non-MST state for [ENCODER:%d:%s] (pipe_mask %02x mst_pipe_mask %02x)\n", + encoder->base.base.id, encoder->base.name, + *pipe_mask, mst_pipe_mask); else *is_dp_mst = mst_pipe_mask; @@ -2066,8 +2162,9 @@ out: if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_LANE_POWERDOWN_ACK | BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED) - DRM_ERROR("Port %c enabled but PHY powered down? " - "(PHY_CTL %08x)\n", port_name(port), tmp); + DRM_ERROR("[ENCODER:%d:%s] enabled but PHY powered down? " + "(PHY_CTL %08x)\n", encoder->base.base.id, + encoder->base.name, tmp); } intel_display_power_put(dev_priv, encoder->power_domain, wakeref); @@ -2138,7 +2235,7 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder, /* * VDSC power is needed when DSC is enabled */ - if (crtc_state->dsc_params.compression_enable) + if (crtc_state->dsc.compression_enable) intel_display_power_get(dev_priv, intel_dsc_power_domain(crtc_state)); } @@ -2269,7 +2366,13 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder) enum phy phy = intel_port_to_phy(dev_priv, port); int n_entries; - if (INTEL_GEN(dev_priv) >= 11) { + if (INTEL_GEN(dev_priv) >= 12) { + if (intel_phy_is_combo(dev_priv, phy)) + icl_get_combo_buf_trans(dev_priv, encoder->type, + intel_dp->link_rate, &n_entries); + else + n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations); + } else if (INTEL_GEN(dev_priv) == 11) { if (intel_phy_is_combo(dev_priv, phy)) icl_get_combo_buf_trans(dev_priv, encoder->type, intel_dp->link_rate, &n_entries); @@ -2583,7 +2686,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, u32 level) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; + enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); const struct icl_mg_phy_ddi_buf_trans *ddi_translations; u32 n_entries, val; int ln; @@ -2599,33 +2702,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_LINK_PARAMS(ln, port)); + val = I915_READ(MG_TX1_LINK_PARAMS(ln, tc_port)); val &= ~CRI_USE_FS32; - I915_WRITE(MG_TX1_LINK_PARAMS(ln, port), val); + I915_WRITE(MG_TX1_LINK_PARAMS(ln, tc_port), val); - val = I915_READ(MG_TX2_LINK_PARAMS(ln, port)); + val = I915_READ(MG_TX2_LINK_PARAMS(ln, tc_port)); val &= ~CRI_USE_FS32; - I915_WRITE(MG_TX2_LINK_PARAMS(ln, port), val); + I915_WRITE(MG_TX2_LINK_PARAMS(ln, tc_port), val); } /* Program MG_TX_SWINGCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_SWINGCTRL(ln, port)); + val = I915_READ(MG_TX1_SWINGCTRL(ln, tc_port)); val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; val |= CRI_TXDEEMPH_OVERRIDE_17_12( ddi_translations[level].cri_txdeemph_override_17_12); - I915_WRITE(MG_TX1_SWINGCTRL(ln, port), val); + I915_WRITE(MG_TX1_SWINGCTRL(ln, tc_port), val); - val = I915_READ(MG_TX2_SWINGCTRL(ln, port)); + val = I915_READ(MG_TX2_SWINGCTRL(ln, tc_port)); val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK; val |= CRI_TXDEEMPH_OVERRIDE_17_12( ddi_translations[level].cri_txdeemph_override_17_12); - I915_WRITE(MG_TX2_SWINGCTRL(ln, port), val); + I915_WRITE(MG_TX2_SWINGCTRL(ln, tc_port), val); } /* Program MG_TX_DRVCTRL with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_DRVCTRL(ln, port)); + val = I915_READ(MG_TX1_DRVCTRL(ln, tc_port)); val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK); val |= CRI_TXDEEMPH_OVERRIDE_5_0( @@ -2633,9 +2736,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, CRI_TXDEEMPH_OVERRIDE_11_6( ddi_translations[level].cri_txdeemph_override_11_6) | CRI_TXDEEMPH_OVERRIDE_EN; - I915_WRITE(MG_TX1_DRVCTRL(ln, port), val); + I915_WRITE(MG_TX1_DRVCTRL(ln, tc_port), val); - val = I915_READ(MG_TX2_DRVCTRL(ln, port)); + val = I915_READ(MG_TX2_DRVCTRL(ln, tc_port)); val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK | CRI_TXDEEMPH_OVERRIDE_5_0_MASK); val |= CRI_TXDEEMPH_OVERRIDE_5_0( @@ -2643,7 +2746,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, CRI_TXDEEMPH_OVERRIDE_11_6( ddi_translations[level].cri_txdeemph_override_11_6) | CRI_TXDEEMPH_OVERRIDE_EN; - I915_WRITE(MG_TX2_DRVCTRL(ln, port), val); + I915_WRITE(MG_TX2_DRVCTRL(ln, tc_port), val); /* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */ } @@ -2654,17 +2757,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, * values from table for which TX1 and TX2 enabled. */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_CLKHUB(ln, port)); + val = I915_READ(MG_CLKHUB(ln, tc_port)); if (link_clock < 300000) val |= CFG_LOW_RATE_LKREN_EN; else val &= ~CFG_LOW_RATE_LKREN_EN; - I915_WRITE(MG_CLKHUB(ln, port), val); + I915_WRITE(MG_CLKHUB(ln, tc_port), val); } /* Program the MG_TX_DCC<LN, port being used> based on the link frequency */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_DCC(ln, port)); + val = I915_READ(MG_TX1_DCC(ln, tc_port)); val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; if (link_clock <= 500000) { val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; @@ -2672,9 +2775,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, val |= CFG_AMI_CK_DIV_OVERRIDE_EN | CFG_AMI_CK_DIV_OVERRIDE_VAL(1); } - I915_WRITE(MG_TX1_DCC(ln, port), val); + I915_WRITE(MG_TX1_DCC(ln, tc_port), val); - val = I915_READ(MG_TX2_DCC(ln, port)); + val = I915_READ(MG_TX2_DCC(ln, tc_port)); val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK; if (link_clock <= 500000) { val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN; @@ -2682,18 +2785,18 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, val |= CFG_AMI_CK_DIV_OVERRIDE_EN | CFG_AMI_CK_DIV_OVERRIDE_VAL(1); } - I915_WRITE(MG_TX2_DCC(ln, port), val); + I915_WRITE(MG_TX2_DCC(ln, tc_port), val); } /* Program MG_TX_PISO_READLOAD with values from vswing table */ for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_TX1_PISO_READLOAD(ln, port)); + val = I915_READ(MG_TX1_PISO_READLOAD(ln, tc_port)); val |= CRI_CALCINIT; - I915_WRITE(MG_TX1_PISO_READLOAD(ln, port), val); + I915_WRITE(MG_TX1_PISO_READLOAD(ln, tc_port), val); - val = I915_READ(MG_TX2_PISO_READLOAD(ln, port)); + val = I915_READ(MG_TX2_PISO_READLOAD(ln, tc_port)); val |= CRI_CALCINIT; - I915_WRITE(MG_TX2_PISO_READLOAD(ln, port), val); + I915_WRITE(MG_TX2_PISO_READLOAD(ln, tc_port), val); } } @@ -2711,6 +2814,64 @@ static void icl_ddi_vswing_sequence(struct intel_encoder *encoder, icl_mg_phy_ddi_vswing_sequence(encoder, link_clock, level); } +static void +tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock, + u32 level) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum tc_port tc_port = intel_port_to_tc(dev_priv, encoder->port); + const struct tgl_dkl_phy_ddi_buf_trans *ddi_translations; + u32 n_entries, val, ln, dpcnt_mask, dpcnt_val; + + n_entries = ARRAY_SIZE(tgl_dkl_phy_ddi_translations); + ddi_translations = tgl_dkl_phy_ddi_translations; + + if (level >= n_entries) + level = n_entries - 1; + + dpcnt_mask = (DKL_TX_PRESHOOT_COEFF_MASK | + DKL_TX_DE_EMPAHSIS_COEFF_MASK | + DKL_TX_VSWING_CONTROL_MASK); + dpcnt_val = DKL_TX_VSWING_CONTROL(ddi_translations[level].dkl_vswing_control); + dpcnt_val |= DKL_TX_DE_EMPHASIS_COEFF(ddi_translations[level].dkl_de_emphasis_control); + dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control); + + for (ln = 0; ln < 2; ln++) { + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); + + I915_WRITE(DKL_TX_PMD_LANE_SUS(tc_port), 0); + + /* All the registers are RMW */ + val = I915_READ(DKL_TX_DPCNTL0(tc_port)); + val &= ~dpcnt_mask; + val |= dpcnt_val; + I915_WRITE(DKL_TX_DPCNTL0(tc_port), val); + + val = I915_READ(DKL_TX_DPCNTL1(tc_port)); + val &= ~dpcnt_mask; + val |= dpcnt_val; + I915_WRITE(DKL_TX_DPCNTL1(tc_port), val); + + val = I915_READ(DKL_TX_DPCNTL2(tc_port)); + val &= ~DKL_TX_DP20BITMODE; + I915_WRITE(DKL_TX_DPCNTL2(tc_port), val); + } +} + +static void tgl_ddi_vswing_sequence(struct intel_encoder *encoder, + int link_clock, + u32 level, + enum intel_output_type type) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + + if (intel_phy_is_combo(dev_priv, phy)) + icl_combo_phy_ddi_vswing_sequence(encoder, level, type); + else + tgl_dkl_phy_ddi_vswing_sequence(encoder, link_clock, level); +} + static u32 translate_signal_level(int signal_levels) { int i; @@ -2742,7 +2903,10 @@ u32 bxt_signal_levels(struct intel_dp *intel_dp) struct intel_encoder *encoder = &dport->base; int level = intel_ddi_dp_level(intel_dp); - if (INTEL_GEN(dev_priv) >= 11) + if (INTEL_GEN(dev_priv) >= 12) + tgl_ddi_vswing_sequence(encoder, intel_dp->link_rate, + level, encoder->type); + else if (INTEL_GEN(dev_priv) >= 11) icl_ddi_vswing_sequence(encoder, intel_dp->link_rate, level, encoder->type); else if (IS_CANNONLAKE(dev_priv)) @@ -2989,130 +3153,141 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder) } } -static void icl_enable_phy_clock_gating(struct intel_digital_port *dig_port) +static void +icl_phy_set_clock_gating(struct intel_digital_port *dig_port, bool enable) { struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum port port = dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - u32 val; + enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port); + u32 val, bits; int ln; if (tc_port == PORT_TC_NONE) return; - for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_DP_MODE(ln, port)); - val |= MG_DP_MODE_CFG_TR2PWR_GATING | - MG_DP_MODE_CFG_TRPWR_GATING | - MG_DP_MODE_CFG_CLNPWR_GATING | - MG_DP_MODE_CFG_DIGPWR_GATING | - MG_DP_MODE_CFG_GAONPWR_GATING; - I915_WRITE(MG_DP_MODE(ln, port), val); - } + bits = MG_DP_MODE_CFG_TR2PWR_GATING | MG_DP_MODE_CFG_TRPWR_GATING | + MG_DP_MODE_CFG_CLNPWR_GATING | MG_DP_MODE_CFG_DIGPWR_GATING | + MG_DP_MODE_CFG_GAONPWR_GATING; - val = I915_READ(MG_MISC_SUS0(tc_port)); - val |= MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3) | - MG_MISC_SUS0_CFG_TR2PWR_GATING | - MG_MISC_SUS0_CFG_CL2PWR_GATING | - MG_MISC_SUS0_CFG_GAONPWR_GATING | - MG_MISC_SUS0_CFG_TRPWR_GATING | - MG_MISC_SUS0_CFG_CL1PWR_GATING | - MG_MISC_SUS0_CFG_DGPWR_GATING; - I915_WRITE(MG_MISC_SUS0(tc_port), val); -} - -static void icl_disable_phy_clock_gating(struct intel_digital_port *dig_port) -{ - struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); - enum port port = dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(dev_priv, port); - u32 val; - int ln; + for (ln = 0; ln < 2; ln++) { + if (INTEL_GEN(dev_priv) >= 12) { + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); + val = I915_READ(DKL_DP_MODE(tc_port)); + } else { + val = I915_READ(MG_DP_MODE(ln, tc_port)); + } - if (tc_port == PORT_TC_NONE) - return; + if (enable) + val |= bits; + else + val &= ~bits; - for (ln = 0; ln < 2; ln++) { - val = I915_READ(MG_DP_MODE(ln, port)); - val &= ~(MG_DP_MODE_CFG_TR2PWR_GATING | - MG_DP_MODE_CFG_TRPWR_GATING | - MG_DP_MODE_CFG_CLNPWR_GATING | - MG_DP_MODE_CFG_DIGPWR_GATING | - MG_DP_MODE_CFG_GAONPWR_GATING); - I915_WRITE(MG_DP_MODE(ln, port), val); + if (INTEL_GEN(dev_priv) >= 12) + I915_WRITE(DKL_DP_MODE(tc_port), val); + else + I915_WRITE(MG_DP_MODE(ln, tc_port), val); } - val = I915_READ(MG_MISC_SUS0(tc_port)); - val &= ~(MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK | - MG_MISC_SUS0_CFG_TR2PWR_GATING | - MG_MISC_SUS0_CFG_CL2PWR_GATING | - MG_MISC_SUS0_CFG_GAONPWR_GATING | - MG_MISC_SUS0_CFG_TRPWR_GATING | - MG_MISC_SUS0_CFG_CL1PWR_GATING | - MG_MISC_SUS0_CFG_DGPWR_GATING); - I915_WRITE(MG_MISC_SUS0(tc_port), val); + if (INTEL_GEN(dev_priv) == 11) { + bits = MG_MISC_SUS0_CFG_TR2PWR_GATING | + MG_MISC_SUS0_CFG_CL2PWR_GATING | + MG_MISC_SUS0_CFG_GAONPWR_GATING | + MG_MISC_SUS0_CFG_TRPWR_GATING | + MG_MISC_SUS0_CFG_CL1PWR_GATING | + MG_MISC_SUS0_CFG_DGPWR_GATING; + + val = I915_READ(MG_MISC_SUS0(tc_port)); + if (enable) + val |= (bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE(3)); + else + val &= ~(bits | MG_MISC_SUS0_SUSCLK_DYNCLKGATE_MODE_MASK); + I915_WRITE(MG_MISC_SUS0(tc_port), val); + } } -static void icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port) +static void +icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev); - enum port port = intel_dig_port->base.port; - u32 ln0, ln1, lane_mask; + enum tc_port tc_port = intel_port_to_tc(dev_priv, intel_dig_port->base.port); + u32 ln0, ln1, pin_assignment; + u8 width; if (intel_dig_port->tc_mode == TC_PORT_TBT_ALT) return; - ln0 = I915_READ(MG_DP_MODE(0, port)); - ln1 = I915_READ(MG_DP_MODE(1, port)); + if (INTEL_GEN(dev_priv) >= 12) { + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0)); + ln0 = I915_READ(DKL_DP_MODE(tc_port)); + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1)); + ln1 = I915_READ(DKL_DP_MODE(tc_port)); + } else { + ln0 = I915_READ(MG_DP_MODE(0, tc_port)); + ln1 = I915_READ(MG_DP_MODE(1, tc_port)); + } - switch (intel_dig_port->tc_mode) { - case TC_PORT_DP_ALT: - ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); - ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); + ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE); + ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); - lane_mask = intel_tc_port_get_lane_mask(intel_dig_port); + /* DPPATC */ + pin_assignment = intel_tc_port_get_pin_assignment_mask(intel_dig_port); + width = crtc_state->lane_count; - switch (lane_mask) { - case 0x1: - case 0x4: - break; - case 0x2: + switch (pin_assignment) { + case 0x0: + WARN_ON(intel_dig_port->tc_mode != TC_PORT_LEGACY); + if (width == 1) { + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; + } else { + ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; + } + break; + case 0x1: + if (width == 4) { + ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; + } + break; + case 0x2: + if (width == 2) { + ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; + } + break; + case 0x3: + case 0x5: + if (width == 1) { ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; - break; - case 0x3: - ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | - MG_DP_MODE_CFG_DP_X2_MODE; - break; - case 0x8: ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; - break; - case 0xC: - ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | - MG_DP_MODE_CFG_DP_X2_MODE; - break; - case 0xF: - ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | - MG_DP_MODE_CFG_DP_X2_MODE; - ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | - MG_DP_MODE_CFG_DP_X2_MODE; - break; - default: - MISSING_CASE(lane_mask); + } else { + ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; } break; - - case TC_PORT_LEGACY: - ln0 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; - ln1 |= MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE; + case 0x4: + case 0x6: + if (width == 1) { + ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; + } else { + ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; + ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; + } break; - default: - MISSING_CASE(intel_dig_port->tc_mode); - return; + MISSING_CASE(pin_assignment); } - I915_WRITE(MG_DP_MODE(0, port), ln0); - I915_WRITE(MG_DP_MODE(1, port), ln1); + if (INTEL_GEN(dev_priv) >= 12) { + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0)); + I915_WRITE(DKL_DP_MODE(tc_port), ln0); + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1)); + I915_WRITE(DKL_DP_MODE(tc_port), ln1); + } else { + I915_WRITE(MG_DP_MODE(0, tc_port), ln0); + I915_WRITE(MG_DP_MODE(1, tc_port), ln1); + } } static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, @@ -3129,17 +3304,18 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; + struct intel_dp *intel_dp; u32 val; if (!crtc_state->fec_enable) return; - val = I915_READ(DP_TP_CTL(port)); + intel_dp = enc_to_intel_dp(&encoder->base); + val = I915_READ(intel_dp->regs.dp_tp_ctl); val |= DP_TP_CTL_FEC_ENABLE; - I915_WRITE(DP_TP_CTL(port), val); + I915_WRITE(intel_dp->regs.dp_tp_ctl, val); - if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port), + if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, DP_TP_STATUS_FEC_ENABLE_LIVE, 1)) DRM_ERROR("Timed out waiting for FEC Enable Status\n"); } @@ -3148,21 +3324,205 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = encoder->port; + struct intel_dp *intel_dp; u32 val; if (!crtc_state->fec_enable) return; - val = I915_READ(DP_TP_CTL(port)); + intel_dp = enc_to_intel_dp(&encoder->base); + val = I915_READ(intel_dp->regs.dp_tp_ctl); val &= ~DP_TP_CTL_FEC_ENABLE; - I915_WRITE(DP_TP_CTL(port), val); - POSTING_READ(DP_TP_CTL(port)); + I915_WRITE(intel_dp->regs.dp_tp_ctl, val); + POSTING_READ(intel_dp->regs.dp_tp_ctl); } -static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) +static void +tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate) +{ + struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + u32 val; + + if (!cstate->dc3co_exitline) + return; + + val = I915_READ(EXITLINE(cstate->cpu_transcoder)); + val &= ~(EXITLINE_MASK | EXITLINE_ENABLE); + I915_WRITE(EXITLINE(cstate->cpu_transcoder), val); +} + +static void +tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate) +{ + u32 val, exit_scanlines; + struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + + if (!cstate->dc3co_exitline) + return; + + exit_scanlines = cstate->dc3co_exitline; + exit_scanlines <<= EXITLINE_SHIFT; + val = I915_READ(EXITLINE(cstate->cpu_transcoder)); + val &= ~(EXITLINE_MASK | EXITLINE_ENABLE); + val |= exit_scanlines; + val |= EXITLINE_ENABLE; + I915_WRITE(EXITLINE(cstate->cpu_transcoder), val); +} + +static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder, + struct intel_crtc_state *cstate) +{ + u32 exit_scanlines; + struct drm_i915_private *dev_priv = to_i915(cstate->base.crtc->dev); + u32 crtc_vdisplay = cstate->base.adjusted_mode.crtc_vdisplay; + + cstate->dc3co_exitline = 0; + + if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO)) + return; + + /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/ + if (to_intel_crtc(cstate->base.crtc)->pipe != PIPE_A || + encoder->port != PORT_A) + return; + + if (!cstate->has_psr2 || !cstate->base.active) + return; + + /* + * DC3CO Exit time 200us B.Spec 49196 + * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1 + */ + exit_scanlines = + intel_usecs_to_scanlines(&cstate->base.adjusted_mode, 200) + 1; + + if (WARN_ON(exit_scanlines > crtc_vdisplay)) + return; + + cstate->dc3co_exitline = crtc_vdisplay - exit_scanlines; + DRM_DEBUG_KMS("DC3CO exit scanlines %d\n", cstate->dc3co_exitline); +} + +static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state) +{ + u32 val; + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + + if (INTEL_GEN(dev_priv) < 12) + return; + + val = I915_READ(EXITLINE(crtc_state->cpu_transcoder)); + + if (val & EXITLINE_ENABLE) + crtc_state->dc3co_exitline = val & EXITLINE_MASK; +} + +static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base); + bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); + int level = intel_ddi_dp_level(intel_dp); + enum transcoder transcoder = crtc_state->cpu_transcoder; + + tgl_set_psr2_transcoder_exitline(crtc_state); + intel_dp_set_link_params(intel_dp, crtc_state->port_clock, + crtc_state->lane_count, is_mst); + + intel_dp->regs.dp_tp_ctl = TGL_DP_TP_CTL(transcoder); + intel_dp->regs.dp_tp_status = TGL_DP_TP_STATUS(transcoder); + + /* 1.a got on intel_atomic_commit_tail() */ + + /* 2. */ + intel_edp_panel_on(intel_dp); + + /* + * 1.b, 3. and 4.a is done before tgl_ddi_pre_enable_dp() by: + * haswell_crtc_enable()->intel_encoders_pre_pll_enable() and + * haswell_crtc_enable()->intel_enable_shared_dpll() + */ + + /* 4.b */ + intel_ddi_clk_select(encoder, crtc_state); + + /* 5. */ + if (!intel_phy_is_tc(dev_priv, phy) || + dig_port->tc_mode != TC_PORT_TBT_ALT) + intel_display_power_get(dev_priv, + dig_port->ddi_io_power_domain); + + /* 6. */ + icl_program_mg_dp_mode(dig_port, crtc_state); + + /* + * 7.a - Steps in this function should only be executed over MST + * master, what will be taken in care by MST hook + * intel_mst_pre_enable_dp() + */ + intel_ddi_enable_pipe_clock(crtc_state); + + /* 7.b */ + intel_ddi_config_transcoder_func(crtc_state); + + /* 7.d */ + icl_phy_set_clock_gating(dig_port, false); + + /* 7.e */ + tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, + encoder->type); + + /* 7.f */ + if (intel_phy_is_combo(dev_priv, phy)) { + bool lane_reversal = + dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + + intel_combo_phy_power_up_lanes(dev_priv, phy, false, + crtc_state->lane_count, + lane_reversal); + } + + /* 7.g */ + intel_ddi_init_dp_buf_reg(encoder); + + if (!is_mst) + intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); + + intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); + /* + * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit + * in the FEC_CONFIGURATION register to 1 before initiating link + * training + */ + intel_dp_sink_set_fec_ready(intel_dp, crtc_state); + /* 7.c, 7.h, 7.i, 7.j */ + intel_dp_start_link_train(intel_dp); + + /* 7.k */ + if (!is_trans_port_sync_mode(crtc_state)) + intel_dp_stop_link_train(intel_dp); + + /* + * TODO: enable clock gating + * + * It is not written in DP enabling sequence but "PHY Clockgating + * programming" states that clock gating should be enabled after the + * link training but doing so causes all the following trainings to fail + * so not enabling it for now. + */ + + /* 7.l */ + intel_ddi_enable_fec(encoder, crtc_state); + intel_dsc_enable(encoder, crtc_state); +} + +static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); @@ -3177,6 +3537,9 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_dp_set_link_params(intel_dp, crtc_state->port_clock, crtc_state->lane_count, is_mst); + intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port); + intel_dp->regs.dp_tp_status = DP_TP_STATUS(port); + intel_edp_panel_on(intel_dp); intel_ddi_clk_select(encoder, crtc_state); @@ -3186,8 +3549,8 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); - icl_program_mg_dp_mode(dig_port); - icl_disable_phy_clock_gating(dig_port); + icl_program_mg_dp_mode(dig_port, crtc_state); + icl_phy_set_clock_gating(dig_port, false); if (INTEL_GEN(dev_priv) >= 11) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, @@ -3215,12 +3578,13 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, true); intel_dp_sink_set_fec_ready(intel_dp, crtc_state); intel_dp_start_link_train(intel_dp); - if (port != PORT_A || INTEL_GEN(dev_priv) >= 9) + if ((port != PORT_A || INTEL_GEN(dev_priv) >= 9) && + !is_trans_port_sync_mode(crtc_state)) intel_dp_stop_link_train(intel_dp); intel_ddi_enable_fec(encoder, crtc_state); - icl_enable_phy_clock_gating(dig_port); + icl_phy_set_clock_gating(dig_port, true); if (!is_mst) intel_ddi_enable_pipe_clock(crtc_state); @@ -3228,6 +3592,24 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, intel_dsc_enable(encoder, crtc_state); } +static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + + if (INTEL_GEN(dev_priv) >= 12) + tgl_ddi_pre_enable_dp(encoder, crtc_state, conn_state); + else + hsw_ddi_pre_enable_dp(encoder, crtc_state, conn_state); + + /* MST will call a setting of MSA after an allocating of Virtual Channel + * from MST encoder pre_enable callback. + */ + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) + intel_ddi_set_dp_msa(crtc_state, conn_state); +} + static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) @@ -3244,10 +3626,13 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, intel_display_power_get(dev_priv, dig_port->ddi_io_power_domain); - icl_program_mg_dp_mode(dig_port); - icl_disable_phy_clock_gating(dig_port); + icl_program_mg_dp_mode(dig_port, crtc_state); + icl_phy_set_clock_gating(dig_port, false); - if (INTEL_GEN(dev_priv) >= 11) + if (INTEL_GEN(dev_priv) >= 12) + tgl_ddi_vswing_sequence(encoder, crtc_state->port_clock, + level, INTEL_OUTPUT_HDMI); + else if (INTEL_GEN(dev_priv) == 11) icl_ddi_vswing_sequence(encoder, crtc_state->port_clock, level, INTEL_OUTPUT_HDMI); else if (IS_CANNONLAKE(dev_priv)) @@ -3257,7 +3642,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder, else intel_prepare_hdmi_ddi_buffers(encoder, level); - icl_enable_phy_clock_gating(dig_port); + icl_phy_set_clock_gating(dig_port, true); if (IS_GEN9_BC(dev_priv)) skl_ddi_set_iboost(encoder, level, INTEL_OUTPUT_HDMI); @@ -3330,10 +3715,14 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder, wait = true; } - val = I915_READ(DP_TP_CTL(port)); - val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); - val |= DP_TP_CTL_LINK_TRAIN_PAT1; - I915_WRITE(DP_TP_CTL(port), val); + if (intel_crtc_has_dp_encoder(crtc_state)) { + struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); + + val = I915_READ(intel_dp->regs.dp_tp_ctl); + val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); + val |= DP_TP_CTL_LINK_TRAIN_PAT1; + I915_WRITE(intel_dp->regs.dp_tp_ctl, val); + } /* Disable FEC in DP Sink */ intel_ddi_disable_fec_state(encoder, crtc_state); @@ -3373,6 +3762,7 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder, dig_port->ddi_io_power_domain); intel_ddi_clk_disable(encoder); + tgl_clear_psr2_transcoder_exitline(old_crtc_state); } static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder, @@ -3475,7 +3865,8 @@ static void intel_enable_ddi_dp(struct intel_encoder *encoder, intel_edp_backlight_on(crtc_state, conn_state); intel_psr_enable(intel_dp, crtc_state); - intel_dp_ycbcr_420_enable(intel_dp, crtc_state); + intel_dp_vsc_enable(intel_dp, crtc_state, conn_state); + intel_dp_hdr_metadata_enable(intel_dp, crtc_state, conn_state); intel_edp_drrs_enable(intel_dp, crtc_state); if (crtc_state->has_audio) @@ -3486,12 +3877,12 @@ static i915_reg_t gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv, enum port port) { - static const i915_reg_t regs[] = { - [PORT_A] = CHICKEN_TRANS_EDP, - [PORT_B] = CHICKEN_TRANS_A, - [PORT_C] = CHICKEN_TRANS_B, - [PORT_D] = CHICKEN_TRANS_C, - [PORT_E] = CHICKEN_TRANS_A, + static const enum transcoder trans[] = { + [PORT_A] = TRANSCODER_EDP, + [PORT_B] = TRANSCODER_A, + [PORT_C] = TRANSCODER_B, + [PORT_D] = TRANSCODER_C, + [PORT_E] = TRANSCODER_A, }; WARN_ON(INTEL_GEN(dev_priv) < 9); @@ -3499,7 +3890,7 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv, if (WARN_ON(port < PORT_A || port > PORT_E)) port = PORT_A; - return regs[port]; + return CHICKEN_TRANS(trans[port]); } static void intel_enable_ddi_hdmi(struct intel_encoder *encoder, @@ -3633,7 +4024,7 @@ static void intel_ddi_update_pipe_dp(struct intel_encoder *encoder, { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - intel_ddi_set_pipe_settings(crtc_state); + intel_ddi_set_dp_msa(crtc_state, conn_state); intel_psr_update(intel_dp, crtc_state); intel_edp_drrs_enable(intel_dp, crtc_state); @@ -3761,7 +4152,7 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) u32 val; bool wait = false; - if (I915_READ(DP_TP_CTL(port)) & DP_TP_CTL_ENABLE) { + if (I915_READ(intel_dp->regs.dp_tp_ctl) & DP_TP_CTL_ENABLE) { val = I915_READ(DDI_BUF_CTL(port)); if (val & DDI_BUF_CTL_ENABLE) { val &= ~DDI_BUF_CTL_ENABLE; @@ -3769,11 +4160,11 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) wait = true; } - val = I915_READ(DP_TP_CTL(port)); + val = I915_READ(intel_dp->regs.dp_tp_ctl); val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK); val |= DP_TP_CTL_LINK_TRAIN_PAT1; - I915_WRITE(DP_TP_CTL(port), val); - POSTING_READ(DP_TP_CTL(port)); + I915_WRITE(intel_dp->regs.dp_tp_ctl, val); + POSTING_READ(intel_dp->regs.dp_tp_ctl); if (wait) intel_wait_ddi_buf_idle(dev_priv, port); @@ -3788,8 +4179,8 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp) if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) val |= DP_TP_CTL_ENHANCED_FRAME_ENABLE; } - I915_WRITE(DP_TP_CTL(port), val); - POSTING_READ(DP_TP_CTL(port)); + I915_WRITE(intel_dp->regs.dp_tp_ctl, val); + POSTING_READ(intel_dp->regs.dp_tp_ctl); intel_dp->DP |= DDI_BUF_CTL_ENABLE; I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP); @@ -3891,6 +4282,23 @@ void intel_ddi_get_config(struct intel_encoder *encoder, pipe_config->lane_count = ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; intel_dp_get_m_n(intel_crtc, pipe_config); + + if (INTEL_GEN(dev_priv) >= 11) { + i915_reg_t dp_tp_ctl; + + if (IS_GEN(dev_priv, 11)) + dp_tp_ctl = DP_TP_CTL(encoder->port); + else + dp_tp_ctl = TGL_DP_TP_CTL(pipe_config->cpu_transcoder); + + pipe_config->fec_enable = + I915_READ(dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE; + + DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n", + encoder->base.base.id, encoder->base.name, + pipe_config->fec_enable); + } + break; case TRANS_DDI_MODE_SELECT_DP_MST: pipe_config->output_types |= BIT(INTEL_OUTPUT_DP_MST); @@ -3902,6 +4310,9 @@ void intel_ddi_get_config(struct intel_encoder *encoder, break; } + if (encoder->type == INTEL_OUTPUT_EDP) + tgl_dc3co_exitline_get_config(pipe_config); + pipe_config->has_audio = intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder); @@ -3979,10 +4390,13 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder, if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) pipe_config->cpu_transcoder = TRANSCODER_EDP; - if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) + if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI)) { ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state); - else + } else { ret = intel_dp_compute_config(encoder, pipe_config, conn_state); + tgl_dc3co_exitline_compute_config(encoder, pipe_config); + } + if (ret) return ret; @@ -4276,7 +4690,6 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) struct intel_encoder *intel_encoder; struct drm_encoder *encoder; bool init_hdmi, init_dp, init_lspcon = false; - enum pipe pipe; enum phy phy = intel_port_to_phy(dev_priv, port); init_hdmi = port_info->supports_dvi || port_info->supports_hdmi; @@ -4328,8 +4741,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_encoder->power_domain = intel_port_to_power_domain(port); intel_encoder->port = port; intel_encoder->cloneable = 0; - for_each_pipe(dev_priv, pipe) - intel_encoder->crtc_mask |= BIT(pipe); + intel_encoder->pipe_mask = ~0; if (INTEL_GEN(dev_priv) >= 11) intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & @@ -4351,46 +4763,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port) intel_encoder->update_complete = intel_ddi_update_complete; } - switch (port) { - case PORT_A: - intel_dig_port->ddi_io_power_domain = - POWER_DOMAIN_PORT_DDI_A_IO; - break; - case PORT_B: - intel_dig_port->ddi_io_power_domain = - POWER_DOMAIN_PORT_DDI_B_IO; - break; - case PORT_C: - intel_dig_port->ddi_io_power_domain = - POWER_DOMAIN_PORT_DDI_C_IO; - break; - case PORT_D: - intel_dig_port->ddi_io_power_domain = - POWER_DOMAIN_PORT_DDI_D_IO; - break; - case PORT_E: - intel_dig_port->ddi_io_power_domain = - POWER_DOMAIN_PORT_DDI_E_IO; - break; - case PORT_F: - intel_dig_port->ddi_io_power_domain = - POWER_DOMAIN_PORT_DDI_F_IO; - break; - case PORT_G: - intel_dig_port->ddi_io_power_domain = - POWER_DOMAIN_PORT_DDI_G_IO; - break; - case PORT_H: - intel_dig_port->ddi_io_power_domain = - POWER_DOMAIN_PORT_DDI_H_IO; - break; - case PORT_I: - intel_dig_port->ddi_io_power_domain = - POWER_DOMAIN_PORT_DDI_I_IO; - break; - default: - MISSING_CASE(port); - } + WARN_ON(port > PORT_I); + intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO + + port - PORT_A; if (init_dp) { if (!intel_ddi_init_dp_connector(intel_dig_port)) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h index a08365da2643..19aeab1246ee 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.h +++ b/drivers/gpu/drm/i915/display/intel_ddi.h @@ -30,7 +30,8 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state) void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state); void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state); void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state); -void intel_ddi_set_pipe_settings(const struct intel_crtc_state *crtc_state); +void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector); void intel_ddi_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index af50f05f4e9d..6f5e3bd13ad1 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -31,7 +31,6 @@ #include <linux/module.h> #include <linux/dma-resv.h> #include <linux/slab.h> -#include <linux/vgaarb.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -56,6 +55,8 @@ #include "display/intel_tv.h" #include "display/intel_vdsc.h" +#include "gt/intel_rps.h" + #include "i915_drv.h" #include "i915_trace.h" #include "intel_acpi.h" @@ -65,6 +66,7 @@ #include "intel_cdclk.h" #include "intel_color.h" #include "intel_display_types.h" +#include "intel_dp_link_training.h" #include "intel_fbc.h" #include "intel_fbdev.h" #include "intel_fifo_underrun.h" @@ -79,6 +81,7 @@ #include "intel_sideband.h" #include "intel_sprite.h" #include "intel_tc.h" +#include "intel_vga.h" /* Primary plane formats for gen <= 3 */ static const u32 i8xx_primary_formats[] = { @@ -88,7 +91,17 @@ static const u32 i8xx_primary_formats[] = { DRM_FORMAT_XRGB8888, }; -/* Primary plane formats for gen >= 4 */ +/* Primary plane formats for ivb (no fp16 due to hw issue) */ +static const u32 ivb_primary_formats[] = { + DRM_FORMAT_C8, + DRM_FORMAT_RGB565, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, +}; + +/* Primary plane formats for gen >= 4, except ivb */ static const u32 i965_primary_formats[] = { DRM_FORMAT_C8, DRM_FORMAT_RGB565, @@ -96,6 +109,7 @@ static const u32 i965_primary_formats[] = { DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XBGR16161616F, }; static const u64 i9xx_format_modifiers[] = { @@ -135,8 +149,6 @@ static void vlv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config); static void chv_prepare_pll(struct intel_crtc *crtc, const struct intel_crtc_state *pipe_config); -static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *); -static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *); static void intel_crtc_init_scalers(struct intel_crtc *crtc, struct intel_crtc_state *crtc_state); static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state); @@ -490,7 +502,7 @@ static const struct intel_limit intel_limits_bxt = { /* WA Display #0827: Gen9:all */ static void -skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable) +skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) { if (enable) I915_WRITE(CLKGATE_DIS_PSL(pipe), @@ -521,6 +533,20 @@ needs_modeset(const struct intel_crtc_state *state) return drm_atomic_crtc_needs_modeset(&state->base); } +bool +is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) +{ + return (crtc_state->master_transcoder != INVALID_TRANSCODER || + crtc_state->sync_mode_slaves_mask); +} + +static bool +is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) +{ + return (crtc_state->master_transcoder == INVALID_TRANSCODER && + crtc_state->sync_mode_slaves_mask); +} + /* * Platform specific helpers to calculate the port PLL loopback- (clock.m), * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast @@ -1612,8 +1638,8 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, if (intel_de_wait_for_register(dev_priv, dpll_reg, port_mask, expected_mask, 1000)) - WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n", - port_name(dport->base.port), + WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", + dport->base.base.base.id, dport->base.base.name, I915_READ(dpll_reg) & port_mask, expected_mask); } @@ -2079,7 +2105,8 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int pinctl; u32 alignment; - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + if (WARN_ON(!i915_gem_object_is_framebuffer(obj))) + return ERR_PTR(-EINVAL); alignment = intel_surf_alignment(fb, 0); @@ -2161,8 +2188,6 @@ err: void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) { - lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); - i915_gem_object_lock(vma->obj); if (flags & PLANE_HAS_FENCE) i915_vma_unpin_fence(vma); @@ -2739,10 +2764,7 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, size++; /* rotate the x/y offsets to match the GTT view */ - r.x1 = x; - r.y1 = y; - r.x2 = x + width; - r.y2 = y + height; + drm_rect_init(&r, x, y, width, height); drm_rect_rotate(&r, rot_info->plane[i].width * tile_width, rot_info->plane[i].height * tile_height, @@ -2864,10 +2886,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state) struct drm_rect r; /* rotate the x/y offsets to match the GTT view */ - r.x1 = x; - r.y1 = y; - r.x2 = x + width; - r.y2 = y + height; + drm_rect_init(&r, x, y, width, height); drm_rect_rotate(&r, info->plane[i].width * tile_width, info->plane[i].height * tile_height, @@ -2969,6 +2988,8 @@ static int i9xx_format_to_fourcc(int format) return DRM_FORMAT_XRGB2101010; case DISPPLANE_RGBX101010: return DRM_FORMAT_XBGR2101010; + case DISPPLANE_RGBX161616: + return DRM_FORMAT_XBGR16161616F; } } @@ -3066,13 +3087,11 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc, return false; } - mutex_lock(&dev->struct_mutex); obj = i915_gem_object_create_stolen_for_preallocated(dev_priv, base_aligned, base_aligned, size_aligned); - mutex_unlock(&dev->struct_mutex); - if (!obj) + if (IS_ERR(obj)) return false; switch (plane_config->tiling) { @@ -3154,6 +3173,7 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc, intel_set_plane_visible(crtc_state, plane_state, false); fixup_active_planes(crtc_state); crtc_state->data_rate[plane->id] = 0; + crtc_state->min_cdclk[plane->id] = 0; if (plane->id == PLANE_PRIMARY) intel_pre_disable_primary_noatomic(&crtc->base); @@ -3233,13 +3253,11 @@ valid_fb: intel_state->color_plane[0].stride = intel_fb_pitch(fb, 0, intel_state->base.rotation); - mutex_lock(&dev->struct_mutex); intel_state->vma = intel_pin_and_fence_fb_obj(fb, &intel_state->view, intel_plane_uses_fence(intel_state), &intel_state->flags); - mutex_unlock(&dev->struct_mutex); if (IS_ERR(intel_state->vma)) { DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", intel_crtc->pipe, PTR_ERR(intel_state->vma)); @@ -3347,6 +3365,16 @@ static int icl_max_plane_width(const struct drm_framebuffer *fb, return 5120; } +static int skl_max_plane_height(void) +{ + return 4096; +} + +static int icl_max_plane_height(void) +{ + return 4320; +} + static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state, int main_x, int main_y, u32 main_offset) { @@ -3395,7 +3423,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) int w = drm_rect_width(&plane_state->base.src) >> 16; int h = drm_rect_height(&plane_state->base.src) >> 16; int max_width; - int max_height = 4096; + int max_height; u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset; if (INTEL_GEN(dev_priv) >= 11) @@ -3405,6 +3433,11 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) else max_width = skl_max_plane_width(fb, 0, rotation); + if (INTEL_GEN(dev_priv) >= 11) + max_height = icl_max_plane_height(); + else + max_height = skl_max_plane_height(); + if (w > max_width || h > max_height) { DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n", w, h, max_width, max_height); @@ -3471,9 +3504,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state) * Put the final coordinates back so that the src * coordinate checks will see the right values. */ - drm_rect_translate(&plane_state->base.src, - (x << 16) - plane_state->base.src.x1, - (y << 16) - plane_state->base.src.y1); + drm_rect_translate_to(&plane_state->base.src, + x << 16, y << 16); return 0; } @@ -3544,7 +3576,7 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) * Handle the AUX surface first since * the main surface setup depends on it. */ - if (is_planar_yuv_format(fb->format->format)) { + if (drm_format_info_is_yuv_semiplanar(fb->format)) { ret = skl_check_nv12_aux_surface(plane_state); if (ret) return ret; @@ -3565,6 +3597,53 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state) return 0; } +static void i9xx_plane_ratio(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int cpp = fb->format->cpp[0]; + + /* + * g4x bspec says 64bpp pixel rate can't exceed 80% + * of cdclk when the sprite plane is enabled on the + * same pipe. ilk/snb bspec says 64bpp pixel rate is + * never allowed to exceed 80% of cdclk. Let's just go + * with the ilk/snb limit always. + */ + if (cpp == 8) { + *num = 10; + *den = 8; + } else { + *num = 1; + *den = 1; + } +} + +static int i9xx_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int pixel_rate; + unsigned int num, den; + + /* + * Note that crtc_state->pixel_rate accounts for both + * horizontal and vertical panel fitter downscaling factors. + * Pre-HSW bspec tells us to only consider the horizontal + * downscaling factor here. We ignore that and just consider + * both for simplicity. + */ + pixel_rate = crtc_state->pixel_rate; + + i9xx_plane_ratio(crtc_state, plane_state, &num, &den); + + /* two pixels per clock with double wide pipe */ + if (crtc_state->double_wide) + den *= 2; + + return DIV_ROUND_UP(pixel_rate * num, den); +} + unsigned int i9xx_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, @@ -3647,6 +3726,9 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, case DRM_FORMAT_XBGR2101010: dspcntr |= DISPPLANE_RGBX101010; break; + case DRM_FORMAT_XBGR16161616F: + dspcntr |= DISPPLANE_RGBX161616; + break; default: MISSING_CASE(fb->format->format); return 0; @@ -3669,7 +3751,8 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); - int src_x, src_y; + const struct drm_framebuffer *fb = plane_state->base.fb; + int src_x, src_y, src_w; u32 offset; int ret; @@ -3680,9 +3763,14 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state) if (!plane_state->base.visible) return 0; + src_w = drm_rect_width(&plane_state->base.src) >> 16; src_x = plane_state->base.src.x1 >> 16; src_y = plane_state->base.src.y1 >> 16; + /* Undocumented hardware limit on i965/g4x/vlv/chv */ + if (HAS_GMCH(dev_priv) && fb->format->cpp[0] == 8 && src_w > 2048) + return -EINVAL; + intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); if (INTEL_GEN(dev_priv) >= 4) @@ -3695,9 +3783,8 @@ int i9xx_check_plane_surface(struct intel_plane_state *plane_state) * Put the final coordinates back so that the src * coordinate checks will see the right values. */ - drm_rect_translate(&plane_state->base.src, - (src_x << 16) - plane_state->base.src.x1, - (src_y << 16) - plane_state->base.src.y1); + drm_rect_translate_to(&plane_state->base.src, + src_x << 16, src_y << 16); /* HSW/BDW do this automagically in hardware */ if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) { @@ -4227,7 +4314,7 @@ __intel_display_resume(struct drm_device *dev, int i, ret; intel_modeset_setup_hw_state(dev, ctx); - i915_redisable_vga(to_i915(dev)); + intel_vga_redisable(to_i915(dev)); if (!state) return 0; @@ -4259,7 +4346,7 @@ __intel_display_resume(struct drm_device *dev, static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv) { return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display && - intel_has_gpu_reset(dev_priv)); + intel_has_gpu_reset(&dev_priv->gt)); } void intel_prepare_reset(struct drm_i915_private *dev_priv) @@ -4346,7 +4433,7 @@ void intel_finish_reset(struct drm_i915_private *dev_priv) * so need a full re-initialization. */ intel_pps_unlock_regs_wa(dev_priv); - intel_modeset_init_hw(dev); + intel_modeset_init_hw(dev_priv); intel_init_clock_gating(dev_priv); spin_lock_irq(&dev_priv->irq_lock); @@ -4394,50 +4481,60 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc) I915_WRITE(PIPE_CHICKEN(pipe), tmp); } -static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state, - const struct intel_crtc_state *new_crtc_state) +static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state) { - struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - /* drm_atomic_helper_update_legacy_modeset_state might not be called. */ - crtc->base.mode = new_crtc_state->base.mode; + u32 trans_ddi_func_ctl2_val; + u8 master_select; /* - * Update pipe size and adjust fitter if needed: the reason for this is - * that in compute_mode_changes we check the native mode (not the pfit - * mode) to see if we can flip rather than do a full mode set. In the - * fastboot case, we'll flip, but if we don't update the pipesrc and - * pfit state, we'll end up with a big fb scanned out into the wrong - * sized surface. + * Configure the master select and enable Transcoder Port Sync for + * Slave CRTCs transcoder. */ + if (crtc_state->master_transcoder == INVALID_TRANSCODER) + return; - I915_WRITE(PIPESRC(crtc->pipe), - ((new_crtc_state->pipe_src_w - 1) << 16) | - (new_crtc_state->pipe_src_h - 1)); + if (crtc_state->master_transcoder == TRANSCODER_EDP) + master_select = 0; + else + master_select = crtc_state->master_transcoder + 1; - /* on skylake this is done by detaching scalers */ - if (INTEL_GEN(dev_priv) >= 9) { - skl_detach_scalers(new_crtc_state); + /* Set the master select bits for Tranascoder Port Sync */ + trans_ddi_func_ctl2_val = (PORT_SYNC_MODE_MASTER_SELECT(master_select) & + PORT_SYNC_MODE_MASTER_SELECT_MASK) << + PORT_SYNC_MODE_MASTER_SELECT_SHIFT; + /* Enable Transcoder Port Sync */ + trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE; - if (new_crtc_state->pch_pfit.enabled) - skylake_pfit_enable(new_crtc_state); - } else if (HAS_PCH_SPLIT(dev_priv)) { - if (new_crtc_state->pch_pfit.enabled) - ironlake_pfit_enable(new_crtc_state); - else if (old_crtc_state->pch_pfit.enabled) - ironlake_pfit_disable(old_crtc_state); - } + I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder), + trans_ddi_func_ctl2_val); +} - if (INTEL_GEN(dev_priv) >= 11) - icl_set_pipe_chicken(crtc); +static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + i915_reg_t reg; + u32 trans_ddi_func_ctl2_val; + + if (old_crtc_state->master_transcoder == INVALID_TRANSCODER) + return; + + DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n", + transcoder_name(old_crtc_state->cpu_transcoder)); + + reg = TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder); + trans_ddi_func_ctl2_val = ~(PORT_SYNC_MODE_ENABLE | + PORT_SYNC_MODE_MASTER_SELECT_MASK); + I915_WRITE(reg, trans_ddi_func_ctl2_val); } static void intel_fdi_normal_train(struct intel_crtc *crtc) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp; @@ -4480,7 +4577,7 @@ static void ironlake_fdi_link_train(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp, tries; @@ -4581,7 +4678,7 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp, i, retry; @@ -4714,7 +4811,7 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; i915_reg_t reg; u32 temp, i, j; @@ -4832,7 +4929,7 @@ static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; i915_reg_t reg; u32 temp; @@ -4869,7 +4966,7 @@ static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; i915_reg_t reg; u32 temp; @@ -4900,7 +4997,7 @@ static void ironlake_fdi_disable(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; i915_reg_t reg; u32 temp; @@ -5215,7 +5312,7 @@ static void ironlake_pch_enable(const struct intel_atomic_state *state, struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; u32 temp; assert_pch_transcoder_disabled(dev_priv, pipe); @@ -5310,7 +5407,7 @@ static void lpt_pch_enable(const struct intel_atomic_state *state, lpt_enable_pch_transcoder(dev_priv, cpu_transcoder); } -static void cpt_verify_modeset(struct drm_device *dev, int pipe) +static void cpt_verify_modeset(struct drm_device *dev, enum pipe pipe) { struct drm_i915_private *dev_priv = to_i915(dev); i915_reg_t dslreg = PIPEDSL(pipe); @@ -5462,7 +5559,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, return 0; } - if (format && is_planar_yuv_format(format->format) && + if (format && drm_format_info_is_yuv_semiplanar(format) && (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) { DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n"); return -EINVAL; @@ -5539,7 +5636,7 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, /* Pre-gen11 and SDR planes always need a scaler for planar formats. */ if (!icl_is_hdr_plane(dev_priv, intel_plane->id) && - fb && is_planar_yuv_format(fb->format->format)) + fb && drm_format_info_is_yuv_semiplanar(fb->format)) need_scaler = true; ret = skl_update_scaler(crtc_state, force_detach, @@ -5571,10 +5668,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_ARGB8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: - case DRM_FORMAT_XBGR16161616F: - case DRM_FORMAT_ABGR16161616F: - case DRM_FORMAT_XRGB16161616F: - case DRM_FORMAT_ARGB16161616F: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: @@ -5590,6 +5683,13 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state, case DRM_FORMAT_XVYU12_16161616: case DRM_FORMAT_XVYU16161616: break; + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + if (INTEL_GEN(dev_priv) >= 11) + break; + /* fall through */ default: DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n", intel_plane->base.base.id, intel_plane->base.name, @@ -5649,7 +5749,7 @@ static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; if (crtc_state->pch_pfit.enabled) { /* Force use of hard-coded filter coefficients @@ -5731,13 +5831,8 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc) { - if (intel_crtc->overlay) { - struct drm_device *dev = intel_crtc->base.dev; - - mutex_lock(&dev->struct_mutex); + if (intel_crtc->overlay) (void) intel_overlay_switch_off(intel_crtc->overlay); - mutex_unlock(&dev->struct_mutex); - } /* Let userspace switch the overlay on again. In most cases userspace * has to recompute where to put it anyway. @@ -5762,7 +5857,7 @@ intel_post_enable_primary(struct drm_crtc *crtc, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; /* * Gen2 reports pipe underruns whenever all planes are disabled. @@ -5786,7 +5881,7 @@ intel_pre_disable_primary_noatomic(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; /* * Gen2 reports pipe underruns whenever all planes are disabled. @@ -6309,7 +6404,7 @@ static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; if (WARN_ON(intel_crtc->active)) return; @@ -6442,7 +6537,7 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe, hsw_workaround_pipe; + enum pipe pipe = intel_crtc->pipe, hsw_workaround_pipe; enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; bool psl_clkgate_wa; @@ -6462,6 +6557,9 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, if (!transcoder_is_dsi(cpu_transcoder)) intel_set_pipe_timings(pipe_config); + if (INTEL_GEN(dev_priv) >= 11) + icl_enable_trans_port_sync(pipe_config); + intel_set_pipe_src_size(pipe_config); if (cpu_transcoder != TRANSCODER_EDP && @@ -6507,7 +6605,6 @@ static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, if (INTEL_GEN(dev_priv) >= 11) icl_set_pipe_chicken(intel_crtc); - intel_ddi_set_pipe_settings(pipe_config); if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_enable_transcoder_func(pipe_config); @@ -6568,7 +6665,7 @@ static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; /* * Sometimes spurious CPU pipe underruns happen when the @@ -6640,6 +6737,9 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state, if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST)) intel_ddi_set_vc_payload_alloc(old_crtc_state, false); + if (INTEL_GEN(dev_priv) >= 11) + icl_disable_transcoder_port_sync(old_crtc_state); + if (!transcoder_is_dsi(cpu_transcoder)) intel_ddi_disable_transcoder_func(old_crtc_state); @@ -6737,6 +6837,8 @@ enum intel_display_power_domain intel_port_to_power_domain(enum port port) return POWER_DOMAIN_PORT_DDI_E_LANES; case PORT_F: return POWER_DOMAIN_PORT_DDI_F_LANES; + case PORT_G: + return POWER_DOMAIN_PORT_DDI_G_LANES; default: MISSING_CASE(port); return POWER_DOMAIN_PORT_OTHER; @@ -6753,16 +6855,18 @@ intel_aux_power_domain(struct intel_digital_port *dig_port) dig_port->tc_mode == TC_PORT_TBT_ALT) { switch (dig_port->aux_ch) { case AUX_CH_C: - return POWER_DOMAIN_AUX_TBT1; + return POWER_DOMAIN_AUX_C_TBT; case AUX_CH_D: - return POWER_DOMAIN_AUX_TBT2; + return POWER_DOMAIN_AUX_D_TBT; case AUX_CH_E: - return POWER_DOMAIN_AUX_TBT3; + return POWER_DOMAIN_AUX_E_TBT; case AUX_CH_F: - return POWER_DOMAIN_AUX_TBT4; + return POWER_DOMAIN_AUX_F_TBT; + case AUX_CH_G: + return POWER_DOMAIN_AUX_G_TBT; default: MISSING_CASE(dig_port->aux_ch); - return POWER_DOMAIN_AUX_TBT1; + return POWER_DOMAIN_AUX_C_TBT; } } @@ -6779,6 +6883,8 @@ intel_aux_power_domain(struct intel_digital_port *dig_port) return POWER_DOMAIN_AUX_E; case AUX_CH_F: return POWER_DOMAIN_AUX_F; + case AUX_CH_G: + return POWER_DOMAIN_AUX_G; default: MISSING_CASE(dig_port->aux_ch); return POWER_DOMAIN_AUX_A; @@ -6855,7 +6961,7 @@ static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; if (WARN_ON(intel_crtc->active)) return; @@ -6987,7 +7093,7 @@ static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state, struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = to_i915(dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; /* * On gen2 planes are double buffered but the pipe isn't, so we must @@ -7096,7 +7202,7 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc, intel_display_power_put_unchecked(dev_priv, domain); intel_crtc->enabled_power_domains = 0; - dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe); + dev_priv->active_pipes &= ~BIT(intel_crtc->pipe); dev_priv->min_cdclk[intel_crtc->pipe] = 0; dev_priv->min_voltage_level[intel_crtc->pipe] = 0; @@ -7204,7 +7310,7 @@ static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, } } - if (INTEL_INFO(dev_priv)->num_pipes == 2) + if (INTEL_NUM_PIPES(dev_priv) == 2) return 0; /* Ivybridge 3 pipe is really complicated */ @@ -7542,6 +7648,27 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, constant_n); } +static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) +{ + /* + * There may be no VBT; and if the BIOS enabled SSC we can + * just keep using it to avoid unnecessary flicker. Whereas if the + * BIOS isn't using it, don't assume it will work even if the VBT + * indicates as much. + */ + if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { + bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) & + DREF_SSC1_ENABLE; + + if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { + DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n", + enableddisabled(bios_lvds_use_ssc), + enableddisabled(dev_priv->vbt.lvds_use_ssc)); + dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; + } + } +} + static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) { if (i915_modparams.panel_use_ssc >= 0) @@ -8193,6 +8320,21 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) (crtc_state->pipe_src_h - 1)); } +static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (IS_GEN(dev_priv, 2)) + return false; + + if (INTEL_GEN(dev_priv) >= 9 || + IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW; + else + return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK; +} + static void intel_get_pipe_timings(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { @@ -8231,7 +8373,7 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc, pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1; pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1; - if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) { + if (intel_pipe_is_interlaced(pipe_config)) { pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE; pipe_config->base.adjusted_mode.crtc_vtotal += 1; pipe_config->base.adjusted_mode.crtc_vblank_end += 1; @@ -8563,7 +8705,7 @@ static void vlv_crtc_clock_get(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = pipe_config->cpu_transcoder; + enum pipe pipe = crtc->pipe; struct dpll clock; u32 mdiv; int refclk = 100000; @@ -8673,7 +8815,7 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = pipe_config->cpu_transcoder; + enum pipe pipe = crtc->pipe; enum dpio_channel port = vlv_pipe_to_channel(pipe); struct dpll clock; u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3; @@ -8702,47 +8844,24 @@ static void chv_crtc_clock_get(struct intel_crtc *crtc, pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock); } -static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) +static enum intel_output_format +bdw_get_pipemisc_output_format(struct intel_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB; - - pipe_config->lspcon_downsampling = false; + u32 tmp; - if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { - u32 tmp = I915_READ(PIPEMISC(crtc->pipe)); + tmp = I915_READ(PIPEMISC(crtc->pipe)); - if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { - bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE; - bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND; + if (tmp & PIPEMISC_YUV420_ENABLE) { + /* We support 4:2:0 in full blend mode only */ + WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0); - if (ycbcr420_enabled) { - /* We support 4:2:0 in full blend mode only */ - if (!blend) - output = INTEL_OUTPUT_FORMAT_INVALID; - else if (!(IS_GEMINILAKE(dev_priv) || - INTEL_GEN(dev_priv) >= 10)) - output = INTEL_OUTPUT_FORMAT_INVALID; - else - output = INTEL_OUTPUT_FORMAT_YCBCR420; - } else { - /* - * Currently there is no interface defined to - * check user preference between RGB/YCBCR444 - * or YCBCR420. So the only possible case for - * YCBCR444 usage is driving YCBCR420 output - * with LSPCON, when pipe is configured for - * YCBCR444 output and LSPCON takes care of - * downsampling it. - */ - pipe_config->lspcon_downsampling = true; - output = INTEL_OUTPUT_FORMAT_YCBCR444; - } - } + return INTEL_OUTPUT_FORMAT_YCBCR420; + } else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) { + return INTEL_OUTPUT_FORMAT_YCBCR444; + } else { + return INTEL_OUTPUT_FORMAT_RGB; } - - pipe_config->output_format = output; } static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) @@ -8780,6 +8899,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = NULL; + pipe_config->master_transcoder = INVALID_TRANSCODER; ret = false; @@ -9419,9 +9539,19 @@ static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state) else val |= PIPECONF_PROGRESSIVE; + /* + * This would end up with an odd purple hue over + * the entire display. Make sure we don't do it. + */ + WARN_ON(crtc_state->limited_color_range && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); + if (crtc_state->limited_color_range) val |= PIPECONF_COLOR_RANGE_SELECT; + if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) + val |= PIPECONF_OUTPUT_COLORSPACE_YUV709; + val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode); I915_WRITE(PIPECONF(pipe), val); @@ -9443,6 +9573,10 @@ static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state) else val |= PIPECONF_PROGRESSIVE; + if (IS_HASWELL(dev_priv) && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) + val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW; + I915_WRITE(PIPECONF(cpu_transcoder), val); POSTING_READ(PIPECONF(cpu_transcoder)); } @@ -9593,7 +9727,7 @@ static void ironlake_compute_dpll(struct intel_crtc *crtc, * clear if it''s a win or loss power wise. No point in doing * this on ILK at all since it has a fixed DPLL<->pipe mapping. */ - if (INTEL_INFO(dev_priv)->num_pipes == 3 && + if (INTEL_NUM_PIPES(dev_priv) == 3 && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) dpll |= DPLL_SDVO_HIGH_SPEED; @@ -9892,8 +10026,8 @@ skylake_get_initial_plane_config(struct intel_crtc *crtc, offset = I915_READ(PLANE_OFFSET(pipe, plane_id)); val = I915_READ(PLANE_SIZE(pipe, plane_id)); - fb->height = ((val >> 16) & 0xfff) + 1; - fb->width = ((val >> 0) & 0x1fff) + 1; + fb->height = ((val >> 16) & 0xffff) + 1; + fb->width = ((val >> 0) & 0xffff) + 1; val = I915_READ(PLANE_STRIDE(pipe, plane_id)); stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); @@ -9954,9 +10088,9 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, if (!wakeref) return false; - pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; pipe_config->shared_dpll = NULL; + pipe_config->master_transcoder = INVALID_TRANSCODER; ret = false; tmp = I915_READ(PIPECONF(crtc->pipe)); @@ -9983,6 +10117,16 @@ static bool ironlake_get_pipe_config(struct intel_crtc *crtc, if (tmp & PIPECONF_COLOR_RANGE_SELECT) pipe_config->limited_color_range = true; + switch (tmp & PIPECONF_OUTPUT_COLORSPACE_MASK) { + case PIPECONF_OUTPUT_COLORSPACE_YUV601: + case PIPECONF_OUTPUT_COLORSPACE_YUV709: + pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; + break; + default: + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + break; + } + pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> PIPECONF_GAMMA_MODE_SHIFT; @@ -10397,6 +10541,59 @@ static void haswell_get_ddi_port_state(struct intel_crtc *crtc, } } +static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder) +{ + u32 trans_port_sync, master_select; + + trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder)); + + if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0) + return INVALID_TRANSCODER; + + master_select = trans_port_sync & + PORT_SYNC_MODE_MASTER_SELECT_MASK; + if (master_select == 0) + return TRANSCODER_EDP; + else + return master_select - 1; +} + +static void icelake_get_trans_port_sync_config(struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + u32 transcoders; + enum transcoder cpu_transcoder; + + crtc_state->master_transcoder = transcoder_master_readout(dev_priv, + crtc_state->cpu_transcoder); + + transcoders = BIT(TRANSCODER_A) | + BIT(TRANSCODER_B) | + BIT(TRANSCODER_C) | + BIT(TRANSCODER_D); + for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { + enum intel_display_power_domain power_domain; + intel_wakeref_t trans_wakeref; + + power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); + trans_wakeref = intel_display_power_get_if_enabled(dev_priv, + power_domain); + + if (!trans_wakeref) + continue; + + if (transcoder_master_readout(dev_priv, cpu_transcoder) == + crtc_state->cpu_transcoder) + crtc_state->sync_mode_slaves_mask |= BIT(cpu_transcoder); + + intel_display_power_put(dev_priv, power_domain, trans_wakeref); + } + + WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER && + crtc_state->sync_mode_slaves_mask); +} + static bool haswell_get_pipe_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { @@ -10408,6 +10605,8 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, intel_crtc_init_scalers(crtc, pipe_config); + pipe_config->master_transcoder = INVALID_TRANSCODER; + power_domain = POWER_DOMAIN_PIPE(crtc->pipe); wf = intel_display_power_get_if_enabled(dev_priv, power_domain); if (!wf) @@ -10438,7 +10637,30 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, } intel_get_pipe_src_size(crtc, pipe_config); - intel_get_crtc_ycbcr_config(crtc, pipe_config); + + if (IS_HASWELL(dev_priv)) { + u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder)); + + if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW) + pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444; + else + pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + } else { + pipe_config->output_format = + bdw_get_pipemisc_output_format(crtc); + + /* + * Currently there is no interface defined to + * check user preference between RGB/YCBCR444 + * or YCBCR420. So the only possible case for + * YCBCR444 usage is driving YCBCR420 output + * with LSPCON, when pipe is configured for + * YCBCR444 output and LSPCON takes care of + * downsampling it. + */ + pipe_config->lspcon_downsampling = + pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444; + } pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe)); @@ -10493,6 +10715,10 @@ static bool haswell_get_pipe_config(struct intel_crtc *crtc, pipe_config->pixel_multiplier = 1; } + if (INTEL_GEN(dev_priv) >= 11 && + !transcoder_is_dsi(pipe_config->cpu_transcoder)) + icelake_get_trans_port_sync_config(pipe_config); + out: for_each_power_domain(power_domain, power_domain_mask) intel_display_power_put(dev_priv, @@ -10514,21 +10740,13 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) else base = intel_plane_ggtt_offset(plane_state); - base += plane_state->color_plane[0].offset; - - /* ILK+ do this automagically */ - if (HAS_GMCH(dev_priv) && - plane_state->base.rotation & DRM_MODE_ROTATE_180) - base += (plane_state->base.crtc_h * - plane_state->base.crtc_w - 1) * fb->format->cpp[0]; - - return base; + return base + plane_state->color_plane[0].offset; } static u32 intel_cursor_position(const struct intel_plane_state *plane_state) { - int x = plane_state->base.crtc_x; - int y = plane_state->base.crtc_y; + int x = plane_state->base.dst.x1; + int y = plane_state->base.dst.y1; u32 pos = 0; if (x < 0) { @@ -10550,8 +10768,8 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) { const struct drm_mode_config *config = &plane_state->base.plane->dev->mode_config; - int width = plane_state->base.crtc_w; - int height = plane_state->base.crtc_h; + int width = drm_rect_width(&plane_state->base.dst); + int height = drm_rect_height(&plane_state->base.dst); return width > 0 && width <= config->cursor_width && height > 0 && height <= config->cursor_height; @@ -10559,6 +10777,9 @@ static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state) static int intel_cursor_check_surface(struct intel_plane_state *plane_state) { + struct drm_i915_private *dev_priv = + to_i915(plane_state->base.plane->dev); + unsigned int rotation = plane_state->base.rotation; int src_x, src_y; u32 offset; int ret; @@ -10570,8 +10791,8 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state) if (!plane_state->base.visible) return 0; - src_x = plane_state->base.src_x >> 16; - src_y = plane_state->base.src_y >> 16; + src_x = plane_state->base.src.x1 >> 16; + src_y = plane_state->base.src.y1 >> 16; intel_add_fb_offsets(&src_x, &src_y, plane_state, 0); offset = intel_plane_compute_aligned_offset(&src_x, &src_y, @@ -10582,7 +10803,25 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state) return -EINVAL; } + /* + * Put the final coordinates back so that the src + * coordinate checks will see the right values. + */ + drm_rect_translate_to(&plane_state->base.src, + src_x << 16, src_y << 16); + + /* ILK+ do this automagically in hardware */ + if (HAS_GMCH(dev_priv) && rotation & DRM_MODE_ROTATE_180) { + const struct drm_framebuffer *fb = plane_state->base.fb; + int src_w = drm_rect_width(&plane_state->base.src) >> 16; + int src_h = drm_rect_height(&plane_state->base.src) >> 16; + + offset += (src_h * src_w - 1) * fb->format->cpp[0]; + } + plane_state->color_plane[0].offset = offset; + plane_state->color_plane[0].x = src_x; + plane_state->color_plane[0].y = src_y; return 0; } @@ -10606,6 +10845,10 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state, if (ret) return ret; + /* Use the unclipped src/dst rectangles, which we program to hw */ + plane_state->base.src = drm_plane_state_src(&plane_state->base); + plane_state->base.dst = drm_plane_state_dest(&plane_state->base); + ret = intel_cursor_check_surface(plane_state); if (ret) return ret; @@ -10648,7 +10891,7 @@ static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state) { - int width = plane_state->base.crtc_w; + int width = drm_rect_width(&plane_state->base.dst); /* * 845g/865g are only limited by the width of their cursors, @@ -10674,8 +10917,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, /* Check for which cursor types we support */ if (!i845_cursor_size_ok(plane_state)) { DRM_DEBUG("Cursor dimension %dx%d not supported\n", - plane_state->base.crtc_w, - plane_state->base.crtc_h); + drm_rect_width(&plane_state->base.dst), + drm_rect_height(&plane_state->base.dst)); return -EINVAL; } @@ -10708,8 +10951,8 @@ static void i845_update_cursor(struct intel_plane *plane, unsigned long irqflags; if (plane_state && plane_state->base.visible) { - unsigned int width = plane_state->base.crtc_w; - unsigned int height = plane_state->base.crtc_h; + unsigned int width = drm_rect_width(&plane_state->base.dst); + unsigned int height = drm_rect_height(&plane_state->base.dst); cntl = plane_state->ctl | i845_cursor_ctl_crtc(crtc_state); @@ -10811,7 +11054,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) cntl |= MCURSOR_TRICKLE_FEED_DISABLE; - switch (plane_state->base.crtc_w) { + switch (drm_rect_width(&plane_state->base.dst)) { case 64: cntl |= MCURSOR_MODE_64_ARGB_AX; break; @@ -10822,7 +11065,7 @@ static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, cntl |= MCURSOR_MODE_256_ARGB_AX; break; default: - MISSING_CASE(plane_state->base.crtc_w); + MISSING_CASE(drm_rect_width(&plane_state->base.dst)); return 0; } @@ -10836,8 +11079,8 @@ static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); - int width = plane_state->base.crtc_w; - int height = plane_state->base.crtc_h; + int width = drm_rect_width(&plane_state->base.dst); + int height = drm_rect_height(&plane_state->base.dst); if (!intel_cursor_size_ok(plane_state)) return false; @@ -10890,17 +11133,19 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, /* Check for which cursor types we support */ if (!i9xx_cursor_size_ok(plane_state)) { DRM_DEBUG("Cursor dimension %dx%d not supported\n", - plane_state->base.crtc_w, - plane_state->base.crtc_h); + drm_rect_width(&plane_state->base.dst), + drm_rect_height(&plane_state->base.dst)); return -EINVAL; } WARN_ON(plane_state->base.visible && plane_state->color_plane[0].stride != fb->pitches[0]); - if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) { + if (fb->pitches[0] != + drm_rect_width(&plane_state->base.dst) * fb->format->cpp[0]) { DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n", - fb->pitches[0], plane_state->base.crtc_w); + fb->pitches[0], + drm_rect_width(&plane_state->base.dst)); return -EINVAL; } @@ -10915,7 +11160,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, * Refuse the put the cursor into that compromised position. */ if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C && - plane_state->base.visible && plane_state->base.crtc_x < 0) { + plane_state->base.visible && plane_state->base.dst.x1 < 0) { DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n"); return -EINVAL; } @@ -10935,11 +11180,14 @@ static void i9xx_update_cursor(struct intel_plane *plane, unsigned long irqflags; if (plane_state && plane_state->base.visible) { + unsigned width = drm_rect_width(&plane_state->base.dst); + unsigned height = drm_rect_height(&plane_state->base.dst); + cntl = plane_state->ctl | i9xx_cursor_ctl_crtc(crtc_state); - if (plane_state->base.crtc_h != plane_state->base.crtc_w) - fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1); + if (width != height) + fbc_ctl = CUR_FBC_CTL_EN | (height - 1); base = intel_cursor_base(plane_state); pos = intel_cursor_position(plane_state); @@ -11084,7 +11332,6 @@ static int intel_modeset_disable_planes(struct drm_atomic_state *state, } int intel_get_load_detect_pipe(struct drm_connector *connector, - const struct drm_display_mode *mode, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx) { @@ -11191,10 +11438,8 @@ found: crtc_state->base.active = crtc_state->base.enable = true; - if (!mode) - mode = &load_detect_mode; - - ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode); + ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, + &load_detect_mode); if (ret) goto fail; @@ -11286,7 +11531,7 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc, { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); - int pipe = pipe_config->cpu_transcoder; + enum pipe pipe = crtc->pipe; u32 dpll = pipe_config->dpll_hw_state.dpll; u32 fp; struct dpll clock; @@ -11510,7 +11755,6 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat bool was_crtc_enabled = old_crtc_state->base.active; bool is_crtc_enabled = crtc_state->base.active; bool turn_off, turn_on, visible, was_visible; - struct drm_framebuffer *fb = plane_state->base.fb; int ret; if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) { @@ -11539,24 +11783,18 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat plane_state->base.visible = visible = false; crtc_state->active_planes &= ~BIT(plane->id); crtc_state->data_rate[plane->id] = 0; + crtc_state->min_cdclk[plane->id] = 0; } if (!was_visible && !visible) return 0; - if (fb != old_plane_state->base.fb) - crtc_state->fb_changed = true; - turn_off = was_visible && (!visible || mode_changed); turn_on = visible && (!was_visible || mode_changed); - DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n", + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", crtc->base.base.id, crtc->base.name, plane->base.base.id, plane->base.name, - fb ? fb->base.id : -1); - - DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n", - plane->base.base.id, plane->base.name, was_visible, visible, turn_off, turn_on, mode_changed); @@ -11665,7 +11903,7 @@ static int icl_add_linked_planes(struct intel_atomic_state *state) int i; for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - linked = plane_state->linked_plane; + linked = plane_state->planar_linked_plane; if (!linked) continue; @@ -11674,8 +11912,8 @@ static int icl_add_linked_planes(struct intel_atomic_state *state) if (IS_ERR(linked_plane_state)) return PTR_ERR(linked_plane_state); - WARN_ON(linked_plane_state->linked_plane != plane); - WARN_ON(linked_plane_state->slave == plane_state->slave); + WARN_ON(linked_plane_state->planar_linked_plane != plane); + WARN_ON(linked_plane_state->planar_slave == plane_state->planar_slave); } return 0; @@ -11698,16 +11936,16 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) * in the crtc_state->active_planes mask. */ for_each_new_intel_plane_in_state(state, plane, plane_state, i) { - if (plane->pipe != crtc->pipe || !plane_state->linked_plane) + if (plane->pipe != crtc->pipe || !plane_state->planar_linked_plane) continue; - plane_state->linked_plane = NULL; - if (plane_state->slave && !plane_state->base.visible) { + plane_state->planar_linked_plane = NULL; + if (plane_state->planar_slave && !plane_state->base.visible) { crtc_state->active_planes &= ~BIT(plane->id); crtc_state->update_planes |= BIT(plane->id); } - plane_state->slave = false; + plane_state->planar_slave = false; } if (!crtc_state->nv12_planes) @@ -11741,10 +11979,10 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state) return -EINVAL; } - plane_state->linked_plane = linked; + plane_state->planar_linked_plane = linked; - linked_state->slave = true; - linked_state->linked_plane = plane; + linked_state->planar_slave = true; + linked_state->planar_linked_plane = plane; crtc_state->active_planes |= BIT(linked->id); crtc_state->update_planes |= BIT(linked->id); DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name); @@ -11764,25 +12002,108 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state) return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes; } -static int intel_crtc_atomic_check(struct drm_crtc *crtc, - struct drm_crtc_state *crtc_state) +static int icl_add_sync_mode_crtcs(struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_crtc_state *pipe_config = - to_intel_crtc_state(crtc_state); + struct drm_crtc *crtc = crtc_state->base.crtc; + struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state); + struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); + struct drm_connector *master_connector, *connector; + struct drm_connector_state *connector_state; + struct drm_connector_list_iter conn_iter; + struct drm_crtc *master_crtc = NULL; + struct drm_crtc_state *master_crtc_state; + struct intel_crtc_state *master_pipe_config; + int i, tile_group_id; + + if (INTEL_GEN(dev_priv) < 11) + return 0; + + /* + * In case of tiled displays there could be one or more slaves but there is + * only one master. Lets make the CRTC used by the connector corresponding + * to the last horizonal and last vertical tile a master/genlock CRTC. + * All the other CRTCs corresponding to other tiles of the same Tile group + * are the slave CRTCs and hold a pointer to their genlock CRTC. + */ + for_each_new_connector_in_state(&state->base, connector, connector_state, i) { + if (connector_state->crtc != crtc) + continue; + if (!connector->has_tile) + continue; + if (crtc_state->base.mode.hdisplay != connector->tile_h_size || + crtc_state->base.mode.vdisplay != connector->tile_v_size) + return 0; + if (connector->tile_h_loc == connector->num_h_tile - 1 && + connector->tile_v_loc == connector->num_v_tile - 1) + continue; + crtc_state->sync_mode_slaves_mask = 0; + tile_group_id = connector->tile_group->id; + drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter); + drm_for_each_connector_iter(master_connector, &conn_iter) { + struct drm_connector_state *master_conn_state = NULL; + + if (!master_connector->has_tile) + continue; + if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 || + master_connector->tile_v_loc != master_connector->num_v_tile - 1) + continue; + if (master_connector->tile_group->id != tile_group_id) + continue; + + master_conn_state = drm_atomic_get_connector_state(&state->base, + master_connector); + if (IS_ERR(master_conn_state)) { + drm_connector_list_iter_end(&conn_iter); + return PTR_ERR(master_conn_state); + } + if (master_conn_state->crtc) { + master_crtc = master_conn_state->crtc; + break; + } + } + drm_connector_list_iter_end(&conn_iter); + + if (!master_crtc) { + DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n", + connector_state->crtc->base.id); + return -EINVAL; + } + + master_crtc_state = drm_atomic_get_crtc_state(&state->base, + master_crtc); + if (IS_ERR(master_crtc_state)) + return PTR_ERR(master_crtc_state); + + master_pipe_config = to_intel_crtc_state(master_crtc_state); + crtc_state->master_transcoder = master_pipe_config->cpu_transcoder; + master_pipe_config->sync_mode_slaves_mask |= + BIT(crtc_state->cpu_transcoder); + DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n", + transcoder_name(crtc_state->master_transcoder), + crtc_state->base.crtc->base.id, + master_pipe_config->sync_mode_slaves_mask); + } + + return 0; +} + +static int intel_crtc_atomic_check(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + bool mode_changed = needs_modeset(crtc_state); int ret; - bool mode_changed = needs_modeset(pipe_config); if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) && - mode_changed && !crtc_state->active) - pipe_config->update_wm_post = true; + mode_changed && !crtc_state->base.active) + crtc_state->update_wm_post = true; - if (mode_changed && crtc_state->enable && + if (mode_changed && crtc_state->base.enable && dev_priv->display.crtc_compute_clock && - !WARN_ON(pipe_config->shared_dpll)) { - ret = dev_priv->display.crtc_compute_clock(intel_crtc, - pipe_config); + !WARN_ON(crtc_state->shared_dpll)) { + ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state); if (ret) return ret; } @@ -11791,19 +12112,19 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, * May need to update pipe gamma enable bits * when C8 planes are getting enabled/disabled. */ - if (c8_planes_changed(pipe_config)) - crtc_state->color_mgmt_changed = true; + if (c8_planes_changed(crtc_state)) + crtc_state->base.color_mgmt_changed = true; - if (mode_changed || pipe_config->update_pipe || - crtc_state->color_mgmt_changed) { - ret = intel_color_check(pipe_config); + if (mode_changed || crtc_state->update_pipe || + crtc_state->base.color_mgmt_changed) { + ret = intel_color_check(crtc_state); if (ret) return ret; } ret = 0; if (dev_priv->display.compute_pipe_wm) { - ret = dev_priv->display.compute_pipe_wm(pipe_config); + ret = dev_priv->display.compute_pipe_wm(crtc_state); if (ret) { DRM_DEBUG_KMS("Target pipe watermarks are invalid\n"); return ret; @@ -11819,7 +12140,7 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, * old state and the new state. We can program these * immediately. */ - ret = dev_priv->display.compute_intermediate_wm(pipe_config); + ret = dev_priv->display.compute_intermediate_wm(crtc_state); if (ret) { DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n"); return ret; @@ -11827,29 +12148,19 @@ static int intel_crtc_atomic_check(struct drm_crtc *crtc, } if (INTEL_GEN(dev_priv) >= 9) { - if (mode_changed || pipe_config->update_pipe) - ret = skl_update_scaler_crtc(pipe_config); - - if (!ret) - ret = icl_check_nv12_planes(pipe_config); + if (mode_changed || crtc_state->update_pipe) + ret = skl_update_scaler_crtc(crtc_state); if (!ret) - ret = skl_check_pipe_max_pixel_rate(intel_crtc, - pipe_config); - if (!ret) - ret = intel_atomic_setup_scalers(dev_priv, intel_crtc, - pipe_config); + ret = intel_atomic_setup_scalers(dev_priv, crtc, + crtc_state); } if (HAS_IPS(dev_priv)) - pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config); + crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state); return ret; } -static const struct drm_crtc_helper_funcs intel_helper_funcs = { - .atomic_check = intel_crtc_atomic_check, -}; - static void intel_modeset_update_connector_atomic_state(struct drm_device *dev) { struct intel_connector *connector; @@ -12159,6 +12470,15 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state); + if (IS_CHERRYVIEW(dev_priv)) + DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", + pipe_config->cgm_mode, pipe_config->gamma_mode, + pipe_config->gamma_enable, pipe_config->csc_enable); + else + DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n", + pipe_config->csc_mode, pipe_config->gamma_mode, + pipe_config->gamma_enable, pipe_config->csc_enable); + dump_planes: if (!state) return; @@ -12179,6 +12499,12 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state) bool ret = true; /* + * We're going to peek into connector->state, + * hence connection_mutex must be held. + */ + drm_modeset_lock_assert_held(&dev->mode_config.connection_mutex); + + /* * Walk the connector list instead of the encoder * list to detect the problem on ddi platforms * where there's just one encoder per digital port. @@ -12260,6 +12586,13 @@ clear_intel_crtc_state(struct intel_crtc_state *crtc_state) if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) saved_state->wm = crtc_state->wm; + /* + * Save the slave bitmask which gets filled for master crtc state during + * slave atomic check call. + */ + if (is_trans_port_sync_master(crtc_state)) + saved_state->sync_mode_slaves_mask = + crtc_state->sync_mode_slaves_mask; /* Keep base drm_crtc_state intact, only clear our extended struct */ BUILD_BUG_ON(offsetof(struct intel_crtc_state, base)); @@ -12353,6 +12686,15 @@ encoder_retry: drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode, CRTC_STEREO_DOUBLE); + /* Set the crtc_state defaults for trans_port_sync */ + pipe_config->master_transcoder = INVALID_TRANSCODER; + ret = icl_add_sync_mode_crtcs(pipe_config); + if (ret) { + DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n", + ret); + return ret; + } + /* Pass our mode to the connectors and the CRTC to give them a chance to * adjust it according to limitations or connector properties, and also * a chance to reject the mode entirely. @@ -12485,22 +12827,23 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv, if ((drm_debug & DRM_UT_KMS) == 0) return; - drm_dbg(DRM_UT_KMS, "fastset mismatch in %s infoframe", name); - drm_dbg(DRM_UT_KMS, "expected:"); + DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name); + DRM_DEBUG_KMS("expected:\n"); hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a); - drm_dbg(DRM_UT_KMS, "found"); + DRM_DEBUG_KMS("found:\n"); hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b); } else { - drm_err("mismatch in %s infoframe", name); - drm_err("expected:"); + DRM_ERROR("mismatch in %s infoframe\n", name); + DRM_ERROR("expected:\n"); hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a); - drm_err("found"); + DRM_ERROR("found:\n"); hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b); } } -static void __printf(3, 4) -pipe_config_mismatch(bool fastset, const char *name, const char *format, ...) +static void __printf(4, 5) +pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc, + const char *name, const char *format, ...) { struct va_format vaf; va_list args; @@ -12510,9 +12853,11 @@ pipe_config_mismatch(bool fastset, const char *name, const char *format, ...) vaf.va = &args; if (fastset) - drm_dbg(DRM_UT_KMS, "fastset mismatch in %s %pV", name, &vaf); + DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n", + crtc->base.base.id, crtc->base.name, name, &vaf); else - drm_err("mismatch in %s %pV", name, &vaf); + DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n", + crtc->base.base.id, crtc->base.name, name, &vaf); va_end(args); } @@ -12540,7 +12885,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, bool fastset) { struct drm_i915_private *dev_priv = to_i915(current_config->base.crtc->dev); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); bool ret = true; + u32 bp_gamma = 0; bool fixup_inherited = fastset && (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) && !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED); @@ -12552,8 +12899,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_X(name) do { \ if (current_config->name != pipe_config->name) { \ - pipe_config_mismatch(fastset, __stringify(name), \ - "(expected 0x%08x, found 0x%08x)\n", \ + pipe_config_mismatch(fastset, crtc, __stringify(name), \ + "(expected 0x%08x, found 0x%08x)", \ current_config->name, \ pipe_config->name); \ ret = false; \ @@ -12562,8 +12909,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_I(name) do { \ if (current_config->name != pipe_config->name) { \ - pipe_config_mismatch(fastset, __stringify(name), \ - "(expected %i, found %i)\n", \ + pipe_config_mismatch(fastset, crtc, __stringify(name), \ + "(expected %i, found %i)", \ current_config->name, \ pipe_config->name); \ ret = false; \ @@ -12572,8 +12919,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_BOOL(name) do { \ if (current_config->name != pipe_config->name) { \ - pipe_config_mismatch(fastset, __stringify(name), \ - "(expected %s, found %s)\n", \ + pipe_config_mismatch(fastset, crtc, __stringify(name), \ + "(expected %s, found %s)", \ yesno(current_config->name), \ yesno(pipe_config->name)); \ ret = false; \ @@ -12589,8 +12936,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \ PIPE_CONF_CHECK_BOOL(name); \ } else { \ - pipe_config_mismatch(fastset, __stringify(name), \ - "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \ + pipe_config_mismatch(fastset, crtc, __stringify(name), \ + "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)", \ yesno(current_config->name), \ yesno(pipe_config->name)); \ ret = false; \ @@ -12599,8 +12946,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_P(name) do { \ if (current_config->name != pipe_config->name) { \ - pipe_config_mismatch(fastset, __stringify(name), \ - "(expected %p, found %p)\n", \ + pipe_config_mismatch(fastset, crtc, __stringify(name), \ + "(expected %p, found %p)", \ current_config->name, \ pipe_config->name); \ ret = false; \ @@ -12611,9 +12958,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, if (!intel_compare_link_m_n(¤t_config->name, \ &pipe_config->name,\ !fastset)) { \ - pipe_config_mismatch(fastset, __stringify(name), \ + pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected tu %i gmch %i/%i link %i/%i, " \ - "found tu %i, gmch %i/%i link %i/%i)\n", \ + "found tu %i, gmch %i/%i link %i/%i)", \ current_config->name.tu, \ current_config->name.gmch_m, \ current_config->name.gmch_n, \ @@ -12638,10 +12985,10 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, &pipe_config->name, !fastset) && \ !intel_compare_link_m_n(¤t_config->alt_name, \ &pipe_config->name, !fastset)) { \ - pipe_config_mismatch(fastset, __stringify(name), \ + pipe_config_mismatch(fastset, crtc, __stringify(name), \ "(expected tu %i gmch %i/%i link %i/%i, " \ "or tu %i gmch %i/%i link %i/%i, " \ - "found tu %i, gmch %i/%i link %i/%i)\n", \ + "found tu %i, gmch %i/%i link %i/%i)", \ current_config->name.tu, \ current_config->name.gmch_m, \ current_config->name.gmch_n, \ @@ -12663,8 +13010,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_FLAGS(name, mask) do { \ if ((current_config->name ^ pipe_config->name) & (mask)) { \ - pipe_config_mismatch(fastset, __stringify(name), \ - "(%x) (expected %i, found %i)\n", \ + pipe_config_mismatch(fastset, crtc, __stringify(name), \ + "(%x) (expected %i, found %i)", \ (mask), \ current_config->name & (mask), \ pipe_config->name & (mask)); \ @@ -12674,8 +13021,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \ if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \ - pipe_config_mismatch(fastset, __stringify(name), \ - "(expected %i, found %i)\n", \ + pipe_config_mismatch(fastset, crtc, __stringify(name), \ + "(expected %i, found %i)", \ current_config->name, \ pipe_config->name); \ ret = false; \ @@ -12692,6 +13039,24 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, } \ } while (0) +#define PIPE_CONF_CHECK_COLOR_LUT(name1, name2, bit_precision) do { \ + if (current_config->name1 != pipe_config->name1) { \ + pipe_config_mismatch(fastset, crtc, __stringify(name1), \ + "(expected %i, found %i, won't compare lut values)", \ + current_config->name1, \ + pipe_config->name1); \ + ret = false;\ + } else { \ + if (!intel_color_lut_equal(current_config->name2, \ + pipe_config->name2, pipe_config->name1, \ + bit_precision)) { \ + pipe_config_mismatch(fastset, crtc, __stringify(name2), \ + "hw_state doesn't match sw_state"); \ + ret = false; \ + } \ + } \ +} while (0) + #define PIPE_CONF_QUIRK(quirk) \ ((current_config->quirks | pipe_config->quirks) & (quirk)) @@ -12730,6 +13095,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(pixel_multiplier); PIPE_CONF_CHECK_I(output_format); + PIPE_CONF_CHECK_I(dc3co_exitline); PIPE_CONF_CHECK_BOOL(has_hdmi_sink); if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) @@ -12738,6 +13104,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_BOOL(hdmi_scrambling); PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio); PIPE_CONF_CHECK_BOOL(has_infoframe); + PIPE_CONF_CHECK_BOOL(fec_enable); PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio); @@ -12787,6 +13154,11 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_X(csc_mode); PIPE_CONF_CHECK_BOOL(gamma_enable); PIPE_CONF_CHECK_BOOL(csc_enable); + + bp_gamma = intel_color_get_gamma_bit_precision(pipe_config); + if (bp_gamma) + PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, base.gamma_lut, bp_gamma); + } PIPE_CONF_CHECK_BOOL(double_wide); @@ -12842,6 +13214,9 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_INFOFRAME(hdmi); PIPE_CONF_CHECK_INFOFRAME(drm); + PIPE_CONF_CHECK_I(sync_mode_slaves_mask); + PIPE_CONF_CHECK_I(master_transcoder); + #undef PIPE_CONF_CHECK_X #undef PIPE_CONF_CHECK_I #undef PIPE_CONF_CHECK_BOOL @@ -12849,6 +13224,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, #undef PIPE_CONF_CHECK_P #undef PIPE_CONF_CHECK_FLAGS #undef PIPE_CONF_CHECK_CLOCK_FUZZY +#undef PIPE_CONF_CHECK_COLOR_LUT #undef PIPE_CONF_QUIRK return ret; @@ -13160,7 +13536,7 @@ intel_verify_planes(struct intel_atomic_state *state) for_each_new_intel_plane_in_state(state, plane, plane_state, i) - assert_plane(plane, plane_state->slave || + assert_plane(plane, plane_state->planar_slave || plane_state->base.visible); } @@ -13276,10 +13652,15 @@ intel_modeset_verify_disabled(struct drm_i915_private *dev_priv, verify_disabled_dpll_state(dev_priv); } -static void update_scanline_offset(const struct intel_crtc_state *crtc_state) +static void +intel_crtc_update_active_timings(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + + drm_calc_timestamping_constants(&crtc->base, adjusted_mode); /* * The scanline counter increments at the leading edge of hsync. @@ -13309,7 +13690,6 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state) * answer that's slightly in the future. */ if (IS_GEN(dev_priv, 2)) { - const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; int vtotal; vtotal = adjusted_mode->crtc_vtotal; @@ -13320,8 +13700,9 @@ static void update_scanline_offset(const struct intel_crtc_state *crtc_state) } else if (HAS_DDI(dev_priv) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { crtc->scanline_offset = 2; - } else + } else { crtc->scanline_offset = 1; + } } static void intel_modeset_clear_plls(struct intel_atomic_state *state) @@ -13403,158 +13784,43 @@ static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state) return 0; } -static int intel_lock_all_pipes(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc *crtc; - - /* Add all pipes to the state */ - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state; - - crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - } - - return 0; -} - -static int intel_modeset_all_pipes(struct intel_atomic_state *state) -{ - struct drm_i915_private *dev_priv = to_i915(state->base.dev); - struct intel_crtc *crtc; - - /* - * Add all pipes to the state, and force - * a modeset on all the active ones. - */ - for_each_intel_crtc(&dev_priv->drm, crtc) { - struct intel_crtc_state *crtc_state; - int ret; - - crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); - - if (!crtc_state->base.active || needs_modeset(crtc_state)) - continue; - - crtc_state->base.mode_changed = true; - - ret = drm_atomic_add_affected_connectors(&state->base, - &crtc->base); - if (ret) - return ret; - - ret = drm_atomic_add_affected_planes(&state->base, - &crtc->base); - if (ret) - return ret; - } - - return 0; -} - static int intel_modeset_checks(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *old_crtc_state, *new_crtc_state; struct intel_crtc *crtc; - int ret = 0, i; - - if (!check_digital_port_conflicts(state)) { - DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); - return -EINVAL; - } + int ret, i; /* keep the current setting */ if (!state->cdclk.force_min_cdclk_changed) state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk; state->modeset = true; - state->active_crtcs = dev_priv->active_crtcs; + state->active_pipes = dev_priv->active_pipes; state->cdclk.logical = dev_priv->cdclk.logical; state->cdclk.actual = dev_priv->cdclk.actual; - state->cdclk.pipe = INVALID_PIPE; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { if (new_crtc_state->base.active) - state->active_crtcs |= 1 << i; + state->active_pipes |= BIT(crtc->pipe); else - state->active_crtcs &= ~(1 << i); + state->active_pipes &= ~BIT(crtc->pipe); if (old_crtc_state->base.active != new_crtc_state->base.active) - state->active_pipe_changes |= drm_crtc_mask(&crtc->base); + state->active_pipe_changes |= BIT(crtc->pipe); } - /* - * See if the config requires any additional preparation, e.g. - * to adjust global state with pipes off. We need to do this - * here so we can get the modeset_pipe updated config for the new - * mode set on this crtc. For other crtcs we need to use the - * adjusted_mode bits in the crtc directly. - */ - if (dev_priv->display.modeset_calc_cdclk) { - enum pipe pipe; - - ret = dev_priv->display.modeset_calc_cdclk(state); - if (ret < 0) + if (state->active_pipe_changes) { + ret = intel_atomic_lock_global_state(state); + if (ret) return ret; - - /* - * Writes to dev_priv->cdclk.logical must protected by - * holding all the crtc locks, even if we don't end up - * touching the hardware - */ - if (intel_cdclk_changed(&dev_priv->cdclk.logical, - &state->cdclk.logical)) { - ret = intel_lock_all_pipes(state); - if (ret < 0) - return ret; - } - - if (is_power_of_2(state->active_crtcs)) { - struct intel_crtc *crtc; - struct intel_crtc_state *crtc_state; - - pipe = ilog2(state->active_crtcs); - crtc = intel_get_crtc_for_pipe(dev_priv, pipe); - crtc_state = intel_atomic_get_new_crtc_state(state, crtc); - if (crtc_state && needs_modeset(crtc_state)) - pipe = INVALID_PIPE; - } else { - pipe = INVALID_PIPE; - } - - /* All pipes must be switched off while we change the cdclk. */ - if (pipe != INVALID_PIPE && - intel_cdclk_needs_cd2x_update(dev_priv, - &dev_priv->cdclk.actual, - &state->cdclk.actual)) { - ret = intel_lock_all_pipes(state); - if (ret < 0) - return ret; - - state->cdclk.pipe = pipe; - } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual, - &state->cdclk.actual)) { - ret = intel_modeset_all_pipes(state); - if (ret < 0) - return ret; - - state->cdclk.pipe = INVALID_PIPE; - } - - DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n", - state->cdclk.logical.cdclk, - state->cdclk.actual.cdclk); - DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n", - state->cdclk.logical.voltage_level, - state->cdclk.actual.voltage_level); } + ret = intel_modeset_calc_cdclk(state); + if (ret) + return ret; + intel_modeset_clear_plls(state); if (IS_HASWELL(dev_priv)) @@ -13603,6 +13869,114 @@ static void intel_crtc_check_fastset(const struct intel_crtc_state *old_crtc_sta new_crtc_state->has_drrs = old_crtc_state->has_drrs; } +static int intel_crtc_add_planes_to_state(struct intel_atomic_state *state, + struct intel_crtc *crtc, + u8 plane_ids_mask) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_plane *plane; + + for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { + struct intel_plane_state *plane_state; + + if ((plane_ids_mask & BIT(plane->id)) == 0) + continue; + + plane_state = intel_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) + return PTR_ERR(plane_state); + } + + return 0; +} + +static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv) +{ + /* See {hsw,vlv,ivb}_plane_ratio() */ + return IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv) || + IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv) || + IS_IVYBRIDGE(dev_priv); +} + +static int intel_atomic_check_planes(struct intel_atomic_state *state, + bool *need_modeset) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + struct intel_crtc_state *old_crtc_state, *new_crtc_state; + struct intel_plane_state *plane_state; + struct intel_plane *plane; + struct intel_crtc *crtc; + int i, ret; + + ret = icl_add_linked_planes(state); + if (ret) + return ret; + + for_each_new_intel_plane_in_state(state, plane, plane_state, i) { + ret = intel_plane_atomic_check(state, plane); + if (ret) { + DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n", + plane->base.base.id, plane->base.name); + return ret; + } + } + + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + u8 old_active_planes, new_active_planes; + + ret = icl_check_nv12_planes(new_crtc_state); + if (ret) + return ret; + + /* + * On some platforms the number of active planes affects + * the planes' minimum cdclk calculation. Add such planes + * to the state before we compute the minimum cdclk. + */ + if (!active_planes_affects_min_cdclk(dev_priv)) + continue; + + old_active_planes = old_crtc_state->active_planes & ~BIT(PLANE_CURSOR); + new_active_planes = new_crtc_state->active_planes & ~BIT(PLANE_CURSOR); + + if (hweight8(old_active_planes) == hweight8(new_active_planes)) + continue; + + ret = intel_crtc_add_planes_to_state(state, crtc, new_active_planes); + if (ret) + return ret; + } + + /* + * active_planes bitmask has been updated, and potentially + * affected planes are part of the state. We can now + * compute the minimum cdclk for each plane. + */ + for_each_new_intel_plane_in_state(state, plane, plane_state, i) + *need_modeset |= intel_plane_calc_min_cdclk(state, plane); + + return 0; +} + +static int intel_atomic_check_crtcs(struct intel_atomic_state *state) +{ + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + int i; + + for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) { + int ret = intel_crtc_atomic_check(state, crtc); + if (ret) { + DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n", + crtc->base.base.id, crtc->base.name); + return ret; + } + } + + return 0; +} + /** * intel_atomic_check - validate state object * @dev: drm device @@ -13616,7 +13990,7 @@ static int intel_atomic_check(struct drm_device *dev, struct intel_crtc_state *old_crtc_state, *new_crtc_state; struct intel_crtc *crtc; int ret, i; - bool any_ms = state->cdclk.force_min_cdclk_changed; + bool any_ms = false; /* Catch I915_MODE_FLAG_INHERITED */ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, @@ -13650,10 +14024,22 @@ static int intel_atomic_check(struct drm_device *dev, any_ms = true; } + if (any_ms && !check_digital_port_conflicts(state)) { + DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); + ret = EINVAL; + goto fail; + } + ret = drm_dp_mst_atomic_check(&state->base); if (ret) goto fail; + any_ms |= state->cdclk.force_min_cdclk_changed; + + ret = intel_atomic_check_planes(state, &any_ms); + if (ret) + goto fail; + if (any_ms) { ret = intel_modeset_checks(state); if (ret) @@ -13662,11 +14048,7 @@ static int intel_atomic_check(struct drm_device *dev, state->cdclk.logical = dev_priv->cdclk.logical; } - ret = icl_add_linked_planes(state); - if (ret) - goto fail; - - ret = drm_atomic_helper_check_planes(dev, &state->base); + ret = intel_atomic_check_crtcs(state); if (ret) goto fail; @@ -13724,20 +14106,100 @@ u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc) return crtc->base.funcs->get_vblank_counter(&crtc->base); } +void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + if (!IS_GEN(dev_priv, 2)) + intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); + + if (crtc_state->has_pch_encoder) { + enum pipe pch_transcoder = + intel_crtc_pch_transcoder(crtc); + + intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); + } +} + +static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + /* + * Update pipe size and adjust fitter if needed: the reason for this is + * that in compute_mode_changes we check the native mode (not the pfit + * mode) to see if we can flip rather than do a full mode set. In the + * fastboot case, we'll flip, but if we don't update the pipesrc and + * pfit state, we'll end up with a big fb scanned out into the wrong + * sized surface. + */ + intel_set_pipe_src_size(new_crtc_state); + + /* on skylake this is done by detaching scalers */ + if (INTEL_GEN(dev_priv) >= 9) { + skl_detach_scalers(new_crtc_state); + + if (new_crtc_state->pch_pfit.enabled) + skylake_pfit_enable(new_crtc_state); + } else if (HAS_PCH_SPLIT(dev_priv)) { + if (new_crtc_state->pch_pfit.enabled) + ironlake_pfit_enable(new_crtc_state); + else if (old_crtc_state->pch_pfit.enabled) + ironlake_pfit_disable(old_crtc_state); + } + + if (INTEL_GEN(dev_priv) >= 11) + icl_set_pipe_chicken(crtc); +} + +static void commit_pipe_config(struct intel_atomic_state *state, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + bool modeset = needs_modeset(new_crtc_state); + + /* + * During modesets pipe configuration was programmed as the + * CRTC was enabled. + */ + if (!modeset) { + if (new_crtc_state->base.color_mgmt_changed || + new_crtc_state->update_pipe) + intel_color_commit(new_crtc_state); + + if (INTEL_GEN(dev_priv) >= 9) + skl_detach_scalers(new_crtc_state); + + if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) + bdw_set_pipemisc(new_crtc_state); + + if (new_crtc_state->update_pipe) + intel_pipe_fastset(old_crtc_state, new_crtc_state); + } + + if (dev_priv->display.atomic_update_watermarks) + dev_priv->display.atomic_update_watermarks(state, + new_crtc_state); +} + static void intel_update_crtc(struct intel_crtc *crtc, struct intel_atomic_state *state, struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { - struct drm_device *dev = state->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); bool modeset = needs_modeset(new_crtc_state); struct intel_plane_state *new_plane_state = intel_atomic_get_new_plane_state(state, to_intel_plane(crtc->base.primary)); if (modeset) { - update_scanline_offset(new_crtc_state); + intel_crtc_update_active_timings(new_crtc_state); + dev_priv->display.crtc_enable(new_crtc_state, state); /* vblanks work again, re-enable pipe CRC. */ @@ -13759,17 +14221,151 @@ static void intel_update_crtc(struct intel_crtc *crtc, else if (new_plane_state) intel_fbc_enable(crtc, new_crtc_state, new_plane_state); - intel_begin_crtc_commit(state, crtc); + /* Perform vblank evasion around commit operation */ + intel_pipe_update_start(new_crtc_state); + + commit_pipe_config(state, old_crtc_state, new_crtc_state); if (INTEL_GEN(dev_priv) >= 9) skl_update_planes_on_crtc(state, crtc); else i9xx_update_planes_on_crtc(state, crtc); - intel_finish_crtc_commit(state, crtc); + intel_pipe_update_end(new_crtc_state); + + /* + * We usually enable FIFO underrun interrupts as part of the + * CRTC enable sequence during modesets. But when we inherit a + * valid pipe configuration from the BIOS we need to take care + * of enabling them on the CRTC's first fastset. + */ + if (new_crtc_state->update_pipe && !modeset && + old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) + intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); +} + +static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *new_crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(new_crtc_state->base.crtc->dev); + enum transcoder slave_transcoder; + + WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask)); + + slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1; + return intel_get_crtc_for_pipe(dev_priv, + (enum pipe)slave_transcoder); } -static void intel_update_crtcs(struct intel_atomic_state *state) +static void intel_old_crtc_state_disables(struct intel_atomic_state *state, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state, + struct intel_crtc *crtc) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + + intel_crtc_disable_planes(state, crtc); + + /* + * We need to disable pipe CRC before disabling the pipe, + * or we race against vblank off. + */ + intel_crtc_disable_pipe_crc(crtc); + + dev_priv->display.crtc_disable(old_crtc_state, state); + crtc->active = false; + intel_fbc_disable(crtc); + intel_disable_shared_dpll(old_crtc_state); + + /* + * Underruns don't always raise interrupts, + * so check manually. + */ + intel_check_cpu_fifo_underruns(dev_priv); + intel_check_pch_fifo_underruns(dev_priv); + + /* FIXME unify this for all platforms */ + if (!new_crtc_state->base.active && + !HAS_GMCH(dev_priv) && + dev_priv->display.initial_watermarks) + dev_priv->display.initial_watermarks(state, + new_crtc_state); +} + +static void intel_trans_port_sync_modeset_disables(struct intel_atomic_state *state, + struct intel_crtc *crtc, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state); + struct intel_crtc_state *new_slave_crtc_state = + intel_atomic_get_new_crtc_state(state, slave_crtc); + struct intel_crtc_state *old_slave_crtc_state = + intel_atomic_get_old_crtc_state(state, slave_crtc); + + WARN_ON(!slave_crtc || !new_slave_crtc_state || + !old_slave_crtc_state); + + /* Disable Slave first */ + intel_pre_plane_update(old_slave_crtc_state, new_slave_crtc_state); + if (old_slave_crtc_state->base.active) + intel_old_crtc_state_disables(state, + old_slave_crtc_state, + new_slave_crtc_state, + slave_crtc); + + /* Disable Master */ + intel_pre_plane_update(old_crtc_state, new_crtc_state); + if (old_crtc_state->base.active) + intel_old_crtc_state_disables(state, + old_crtc_state, + new_crtc_state, + crtc); +} + +static void intel_commit_modeset_disables(struct intel_atomic_state *state) +{ + struct intel_crtc_state *new_crtc_state, *old_crtc_state; + struct intel_crtc *crtc; + int i; + + /* + * Disable CRTC/pipes in reverse order because some features(MST in + * TGL+) requires master and slave relationship between pipes, so it + * should always pick the lowest pipe as master as it will be enabled + * first and disable in the reverse order so the master will be the + * last one to be disabled. + */ + for_each_oldnew_intel_crtc_in_state_reverse(state, crtc, old_crtc_state, + new_crtc_state, i) { + if (!needs_modeset(new_crtc_state)) + continue; + + /* In case of Transcoder port Sync master slave CRTCs can be + * assigned in any order and we need to make sure that + * slave CRTCs are disabled first and then master CRTC since + * Slave vblanks are masked till Master Vblanks. + */ + if (is_trans_port_sync_mode(new_crtc_state)) { + if (is_trans_port_sync_master(new_crtc_state)) + intel_trans_port_sync_modeset_disables(state, + crtc, + old_crtc_state, + new_crtc_state); + else + continue; + } else { + intel_pre_plane_update(old_crtc_state, new_crtc_state); + + if (old_crtc_state->base.active) + intel_old_crtc_state_disables(state, + old_crtc_state, + new_crtc_state, + crtc); + } + } +} + +static void intel_commit_modeset_enables(struct intel_atomic_state *state) { struct intel_crtc *crtc; struct intel_crtc_state *old_crtc_state, *new_crtc_state; @@ -13784,14 +14380,120 @@ static void intel_update_crtcs(struct intel_atomic_state *state) } } -static void skl_update_crtcs(struct intel_atomic_state *state) +static void intel_crtc_enable_trans_port_sync(struct intel_crtc *crtc, + struct intel_atomic_state *state, + struct intel_crtc_state *new_crtc_state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + + intel_crtc_update_active_timings(new_crtc_state); + dev_priv->display.crtc_enable(new_crtc_state, state); + intel_crtc_enable_pipe_crc(crtc); +} + +static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc, + struct intel_atomic_state *state) +{ + struct drm_connector *uninitialized_var(conn); + struct drm_connector_state *conn_state; + struct intel_dp *intel_dp; + int i; + + for_each_new_connector_in_state(&state->base, conn, conn_state, i) { + if (conn_state->crtc == &crtc->base) + break; + } + intel_dp = enc_to_intel_dp(&intel_attached_encoder(conn)->base); + intel_dp_stop_link_train(intel_dp); +} + +static void intel_post_crtc_enable_updates(struct intel_crtc *crtc, + struct intel_atomic_state *state) +{ + struct intel_crtc_state *new_crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + struct intel_crtc_state *old_crtc_state = + intel_atomic_get_old_crtc_state(state, crtc); + struct intel_plane_state *new_plane_state = + intel_atomic_get_new_plane_state(state, + to_intel_plane(crtc->base.primary)); + bool modeset = needs_modeset(new_crtc_state); + + if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc) + intel_fbc_disable(crtc); + else if (new_plane_state) + intel_fbc_enable(crtc, new_crtc_state, new_plane_state); + + /* Perform vblank evasion around commit operation */ + intel_pipe_update_start(new_crtc_state); + commit_pipe_config(state, old_crtc_state, new_crtc_state); + skl_update_planes_on_crtc(state, crtc); + intel_pipe_update_end(new_crtc_state); + + /* + * We usually enable FIFO underrun interrupts as part of the + * CRTC enable sequence during modesets. But when we inherit a + * valid pipe configuration from the BIOS we need to take care + * of enabling them on the CRTC's first fastset. + */ + if (new_crtc_state->update_pipe && !modeset && + old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) + intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); +} + +static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc, + struct intel_atomic_state *state, + struct intel_crtc_state *old_crtc_state, + struct intel_crtc_state *new_crtc_state) +{ + struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state); + struct intel_crtc_state *new_slave_crtc_state = + intel_atomic_get_new_crtc_state(state, slave_crtc); + struct intel_crtc_state *old_slave_crtc_state = + intel_atomic_get_old_crtc_state(state, slave_crtc); + + WARN_ON(!slave_crtc || !new_slave_crtc_state || + !old_slave_crtc_state); + + DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n", + crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id, + slave_crtc->base.name); + + /* Enable seq for slave with with DP_TP_CTL left Idle until the + * master is ready + */ + intel_crtc_enable_trans_port_sync(slave_crtc, + state, + new_slave_crtc_state); + + /* Enable seq for master with with DP_TP_CTL left Idle */ + intel_crtc_enable_trans_port_sync(crtc, + state, + new_crtc_state); + + /* Set Slave's DP_TP_CTL to Normal */ + intel_set_dp_tp_ctl_normal(slave_crtc, + state); + + /* Set Master's DP_TP_CTL To Normal */ + usleep_range(200, 400); + intel_set_dp_tp_ctl_normal(crtc, + state); + + /* Now do the post crtc enable for all master and slaves */ + intel_post_crtc_enable_updates(slave_crtc, + state); + intel_post_crtc_enable_updates(crtc, + state); +} + +static void skl_commit_modeset_enables(struct intel_atomic_state *state) { struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; struct intel_crtc_state *old_crtc_state, *new_crtc_state; unsigned int updated = 0; bool progress; - enum pipe pipe; int i; u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; u8 required_slices = state->wm_results.ddb.enabled_slices; @@ -13816,20 +14518,19 @@ static void skl_update_crtcs(struct intel_atomic_state *state) progress = false; for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + enum pipe pipe = crtc->pipe; bool vbl_wait = false; - unsigned int cmask = drm_crtc_mask(&crtc->base); - - pipe = crtc->pipe; + bool modeset = needs_modeset(new_crtc_state); - if (updated & cmask || !new_crtc_state->base.active) + if (updated & BIT(crtc->pipe) || !new_crtc_state->base.active) continue; if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb, entries, - INTEL_INFO(dev_priv)->num_pipes, i)) + INTEL_NUM_PIPES(dev_priv), i)) continue; - updated |= cmask; + updated |= BIT(pipe); entries[i] = new_crtc_state->wm.skl.ddb; /* @@ -13840,12 +14541,22 @@ static void skl_update_crtcs(struct intel_atomic_state *state) */ if (!skl_ddb_entry_equal(&new_crtc_state->wm.skl.ddb, &old_crtc_state->wm.skl.ddb) && - !new_crtc_state->base.active_changed && + !modeset && state->wm_results.dirty_pipes != updated) vbl_wait = true; - intel_update_crtc(crtc, state, old_crtc_state, - new_crtc_state); + if (modeset && is_trans_port_sync_mode(new_crtc_state)) { + if (is_trans_port_sync_master(new_crtc_state)) + intel_update_trans_port_sync_crtcs(crtc, + state, + old_crtc_state, + new_crtc_state); + else + continue; + } else { + intel_update_crtc(crtc, state, old_crtc_state, + new_crtc_state); + } if (vbl_wait) intel_wait_for_vblank(dev_priv, pipe); @@ -13934,49 +14645,18 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) if (state->modeset) wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); - for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { if (needs_modeset(new_crtc_state) || new_crtc_state->update_pipe) { put_domains[crtc->pipe] = modeset_get_crtc_power_domains(new_crtc_state); } - - if (!needs_modeset(new_crtc_state)) - continue; - - intel_pre_plane_update(old_crtc_state, new_crtc_state); - - if (old_crtc_state->base.active) { - intel_crtc_disable_planes(state, crtc); - - /* - * We need to disable pipe CRC before disabling the pipe, - * or we race against vblank off. - */ - intel_crtc_disable_pipe_crc(crtc); - - dev_priv->display.crtc_disable(old_crtc_state, state); - crtc->active = false; - intel_fbc_disable(crtc); - intel_disable_shared_dpll(old_crtc_state); - - /* - * Underruns don't always raise - * interrupts, so check manually. - */ - intel_check_cpu_fifo_underruns(dev_priv); - intel_check_pch_fifo_underruns(dev_priv); - - /* FIXME unify this for all platforms */ - if (!new_crtc_state->base.active && - !HAS_GMCH(dev_priv) && - dev_priv->display.initial_watermarks) - dev_priv->display.initial_watermarks(state, - new_crtc_state); - } } + intel_commit_modeset_disables(state); + /* FIXME: Eventually get rid of our crtc->config pointer */ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) crtc->config = new_crtc_state; @@ -14017,7 +14697,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_encoders_update_prepare(state); /* Now enable the clocks, plane, pipe, and connectors that we set up. */ - dev_priv->display.update_crtcs(state); + dev_priv->display.commit_modeset_enables(state); if (state->modeset) { intel_encoders_update_complete(state); @@ -14148,6 +14828,14 @@ static void intel_atomic_track_fbs(struct intel_atomic_state *state) plane->frontbuffer_bit); } +static void assert_global_state_locked(struct drm_i915_private *dev_priv) +{ + struct intel_crtc *crtc; + + for_each_intel_crtc(&dev_priv->drm, crtc) + drm_modeset_lock_assert_held(&crtc->base.mutex); +} + static int intel_atomic_commit(struct drm_device *dev, struct drm_atomic_state *_state, bool nonblock) @@ -14213,12 +14901,14 @@ static int intel_atomic_commit(struct drm_device *dev, intel_shared_dpll_swap_state(state); intel_atomic_track_fbs(state); - if (state->modeset) { + if (state->global_state_changed) { + assert_global_state_locked(dev_priv); + memcpy(dev_priv->min_cdclk, state->min_cdclk, sizeof(state->min_cdclk)); memcpy(dev_priv->min_voltage_level, state->min_voltage_level, sizeof(state->min_voltage_level)); - dev_priv->active_crtcs = state->active_crtcs; + dev_priv->active_pipes = state->active_pipes; dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk; intel_cdclk_swap_state(state); @@ -14231,7 +14921,7 @@ static int intel_atomic_commit(struct drm_device *dev, if (nonblock && state->modeset) { queue_work(dev_priv->modeset_wq, &state->base.commit_work); } else if (nonblock) { - queue_work(system_unbound_wq, &state->base.commit_work); + queue_work(dev_priv->flip_wq, &state->base.commit_work); } else { if (state->modeset) flush_workqueue(dev_priv->modeset_wq); @@ -14260,7 +14950,7 @@ static int do_rps_boost(struct wait_queue_entry *_wait, * vblank without our intervention, so leave RPS alone. */ if (!i915_request_started(rq)) - gen6_rps_boost(rq); + intel_rps_boost(rq); i915_request_put(rq); drm_crtc_vblank_put(wait->crtc); @@ -14341,7 +15031,7 @@ static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state) static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) { struct i915_sched_attr attr = { - .priority = I915_PRIORITY_DISPLAY, + .priority = I915_USER_PRIORITY(I915_PRIORITY_DISPLAY), }; i915_gem_object_wait_priority(obj, 0, &attr); @@ -14350,25 +15040,25 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj) /** * intel_prepare_plane_fb - Prepare fb for usage on plane * @plane: drm plane to prepare for - * @new_state: the plane state being prepared + * @_new_plane_state: the plane state being prepared * * Prepares a framebuffer for usage on a display plane. Generally this * involves pinning the underlying object and updating the frontbuffer tracking * bits. Some older platforms need special physical address handling for * cursor planes. * - * Must be called with struct_mutex held. - * * Returns 0 on success, negative error code on failure. */ int intel_prepare_plane_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) + struct drm_plane_state *_new_plane_state) { + struct intel_plane_state *new_plane_state = + to_intel_plane_state(_new_plane_state); struct intel_atomic_state *intel_state = - to_intel_atomic_state(new_state->state); + to_intel_atomic_state(new_plane_state->base.state); struct drm_i915_private *dev_priv = to_i915(plane->dev); - struct drm_framebuffer *fb = new_state->fb; + struct drm_framebuffer *fb = new_plane_state->base.fb; struct drm_i915_gem_object *obj = intel_fb_obj(fb); struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb); int ret; @@ -14399,9 +15089,9 @@ intel_prepare_plane_fb(struct drm_plane *plane, } } - if (new_state->fence) { /* explicit fencing */ + if (new_plane_state->base.fence) { /* explicit fencing */ ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready, - new_state->fence, + new_plane_state->base.fence, I915_FENCE_TIMEOUT, GFP_KERNEL); if (ret < 0) @@ -14415,15 +15105,8 @@ intel_prepare_plane_fb(struct drm_plane *plane, if (ret) return ret; - ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); - if (ret) { - i915_gem_object_unpin_pages(obj); - return ret; - } - - ret = intel_plane_pin_fb(to_intel_plane_state(new_state)); + ret = intel_plane_pin_fb(new_plane_state); - mutex_unlock(&dev_priv->drm.struct_mutex); i915_gem_object_unpin_pages(obj); if (ret) return ret; @@ -14431,7 +15114,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, fb_obj_bump_render_priority(obj); intel_frontbuffer_flush(obj->frontbuffer, ORIGIN_DIRTYFB); - if (!new_state->fence) { /* implicit fencing */ + if (!new_plane_state->base.fence) { /* implicit fencing */ struct dma_fence *fence; ret = i915_sw_fence_await_reservation(&intel_state->commit_ready, @@ -14443,11 +15126,13 @@ intel_prepare_plane_fb(struct drm_plane *plane, fence = dma_resv_get_excl_rcu(obj->base.resv); if (fence) { - add_rps_boost_after_vblank(new_state->crtc, fence); + add_rps_boost_after_vblank(new_plane_state->base.crtc, + fence); dma_fence_put(fence); } } else { - add_rps_boost_after_vblank(new_state->crtc, new_state->fence); + add_rps_boost_after_vblank(new_plane_state->base.crtc, + new_plane_state->base.fence); } /* @@ -14459,7 +15144,7 @@ intel_prepare_plane_fb(struct drm_plane *plane, * maximum clocks following a vblank miss (see do_rps_boost()). */ if (!intel_state->rps_interactive) { - intel_rps_mark_interactive(dev_priv, true); + intel_rps_mark_interactive(&dev_priv->gt.rps, true); intel_state->rps_interactive = true; } @@ -14469,130 +15154,27 @@ intel_prepare_plane_fb(struct drm_plane *plane, /** * intel_cleanup_plane_fb - Cleans up an fb after plane use * @plane: drm plane to clean up for - * @old_state: the state from the previous modeset + * @_old_plane_state: the state from the previous modeset * * Cleans up a framebuffer that has just been removed from a plane. - * - * Must be called with struct_mutex held. */ void intel_cleanup_plane_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) + struct drm_plane_state *_old_plane_state) { + struct intel_plane_state *old_plane_state = + to_intel_plane_state(_old_plane_state); struct intel_atomic_state *intel_state = - to_intel_atomic_state(old_state->state); + to_intel_atomic_state(old_plane_state->base.state); struct drm_i915_private *dev_priv = to_i915(plane->dev); if (intel_state->rps_interactive) { - intel_rps_mark_interactive(dev_priv, false); + intel_rps_mark_interactive(&dev_priv->gt.rps, false); intel_state->rps_interactive = false; } /* Should only be called after a successful intel_prepare_plane_fb()! */ - mutex_lock(&dev_priv->drm.struct_mutex); - intel_plane_unpin_fb(to_intel_plane_state(old_state)); - mutex_unlock(&dev_priv->drm.struct_mutex); -} - -int -skl_max_scale(const struct intel_crtc_state *crtc_state, - u32 pixel_format) -{ - struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - int max_scale, mult; - int crtc_clock, max_dotclk, tmpclk1, tmpclk2; - - if (!crtc_state->base.enable) - return DRM_PLANE_HELPER_NO_SCALING; - - crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; - max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk; - - if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) - max_dotclk *= 2; - - if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock)) - return DRM_PLANE_HELPER_NO_SCALING; - - /* - * skl max scale is lower of: - * close to 3 but not 3, -1 is for that purpose - * or - * cdclk/crtc_clock - */ - mult = is_planar_yuv_format(pixel_format) ? 2 : 3; - tmpclk1 = (1 << 16) * mult - 1; - tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock); - max_scale = min(tmpclk1, tmpclk2); - - return max_scale; -} - -static void intel_begin_crtc_commit(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - bool modeset = needs_modeset(new_crtc_state); - - /* Perform vblank evasion around commit operation */ - intel_pipe_update_start(new_crtc_state); - - if (modeset) - goto out; - - if (new_crtc_state->base.color_mgmt_changed || - new_crtc_state->update_pipe) - intel_color_commit(new_crtc_state); - - if (new_crtc_state->update_pipe) - intel_update_pipe_config(old_crtc_state, new_crtc_state); - else if (INTEL_GEN(dev_priv) >= 9) - skl_detach_scalers(new_crtc_state); - - if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) - bdw_set_pipemisc(new_crtc_state); - -out: - if (dev_priv->display.atomic_update_watermarks) - dev_priv->display.atomic_update_watermarks(state, - new_crtc_state); -} - -void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, - struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - - if (!IS_GEN(dev_priv, 2)) - intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true); - - if (crtc_state->has_pch_encoder) { - enum pipe pch_transcoder = - intel_crtc_pch_transcoder(crtc); - - intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true); - } -} - -static void intel_finish_crtc_commit(struct intel_atomic_state *state, - struct intel_crtc *crtc) -{ - struct intel_crtc_state *old_crtc_state = - intel_atomic_get_old_crtc_state(state, crtc); - struct intel_crtc_state *new_crtc_state = - intel_atomic_get_new_crtc_state(state, crtc); - - intel_pipe_update_end(new_crtc_state); - - if (new_crtc_state->update_pipe && - !needs_modeset(new_crtc_state) && - old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED) - intel_crtc_arm_fifo_underrun(crtc, new_crtc_state); + intel_plane_unpin_fb(old_plane_state); } /** @@ -14649,6 +15231,7 @@ static bool i965_plane_format_mod_supported(struct drm_plane *_plane, case DRM_FORMAT_XBGR8888: case DRM_FORMAT_XRGB2101010: case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_XBGR16161616F: return modifier == DRM_FORMAT_MOD_LINEAR || modifier == I915_FORMAT_MOD_X_TILED; default: @@ -14682,8 +15265,8 @@ static const struct drm_plane_funcs i8xx_plane_funcs = { }; static int -intel_legacy_cursor_update(struct drm_plane *plane, - struct drm_crtc *crtc, +intel_legacy_cursor_update(struct drm_plane *_plane, + struct drm_crtc *_crtc, struct drm_framebuffer *fb, int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h, @@ -14691,11 +15274,13 @@ intel_legacy_cursor_update(struct drm_plane *plane, u32 src_w, u32 src_h, struct drm_modeset_acquire_ctx *ctx) { - struct drm_i915_private *dev_priv = to_i915(crtc->dev); - struct drm_plane_state *old_plane_state, *new_plane_state; - struct intel_plane *intel_plane = to_intel_plane(plane); + struct intel_plane *plane = to_intel_plane(_plane); + struct intel_crtc *crtc = to_intel_crtc(_crtc); + struct intel_plane_state *old_plane_state = + to_intel_plane_state(plane->base.state); + struct intel_plane_state *new_plane_state; struct intel_crtc_state *crtc_state = - to_intel_crtc_state(crtc->state); + to_intel_crtc_state(crtc->base.state); struct intel_crtc_state *new_crtc_state; int ret; @@ -14707,14 +15292,13 @@ intel_legacy_cursor_update(struct drm_plane *plane, crtc_state->update_pipe) goto slow; - old_plane_state = plane->state; /* * Don't do an async update if there is an outstanding commit modifying * the plane. This prevents our async update's changes from getting * overridden by a previous synchronous update's state. */ - if (old_plane_state->commit && - !try_wait_for_completion(&old_plane_state->commit->hw_done)) + if (old_plane_state->base.commit && + !try_wait_for_completion(&old_plane_state->base.commit->hw_done)) goto slow; /* @@ -14722,56 +15306,51 @@ intel_legacy_cursor_update(struct drm_plane *plane, * take the slowpath. Only changing fb or position should be * in the fastpath. */ - if (old_plane_state->crtc != crtc || - old_plane_state->src_w != src_w || - old_plane_state->src_h != src_h || - old_plane_state->crtc_w != crtc_w || - old_plane_state->crtc_h != crtc_h || - !old_plane_state->fb != !fb) + if (old_plane_state->base.crtc != &crtc->base || + old_plane_state->base.src_w != src_w || + old_plane_state->base.src_h != src_h || + old_plane_state->base.crtc_w != crtc_w || + old_plane_state->base.crtc_h != crtc_h || + !old_plane_state->base.fb != !fb) goto slow; - new_plane_state = intel_plane_duplicate_state(plane); + new_plane_state = to_intel_plane_state(intel_plane_duplicate_state(&plane->base)); if (!new_plane_state) return -ENOMEM; - new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc)); + new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(&crtc->base)); if (!new_crtc_state) { ret = -ENOMEM; goto out_free; } - drm_atomic_set_fb_for_plane(new_plane_state, fb); + drm_atomic_set_fb_for_plane(&new_plane_state->base, fb); - new_plane_state->src_x = src_x; - new_plane_state->src_y = src_y; - new_plane_state->src_w = src_w; - new_plane_state->src_h = src_h; - new_plane_state->crtc_x = crtc_x; - new_plane_state->crtc_y = crtc_y; - new_plane_state->crtc_w = crtc_w; - new_plane_state->crtc_h = crtc_h; + new_plane_state->base.src_x = src_x; + new_plane_state->base.src_y = src_y; + new_plane_state->base.src_w = src_w; + new_plane_state->base.src_h = src_h; + new_plane_state->base.crtc_x = crtc_x; + new_plane_state->base.crtc_y = crtc_y; + new_plane_state->base.crtc_w = crtc_w; + new_plane_state->base.crtc_h = crtc_h; ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state, - to_intel_plane_state(old_plane_state), - to_intel_plane_state(new_plane_state)); + old_plane_state, new_plane_state); if (ret) goto out_free; - ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex); + ret = intel_plane_pin_fb(new_plane_state); if (ret) goto out_free; - ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state)); - if (ret) - goto out_unlock; - - intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_FLIP); - intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->fb), - to_intel_frontbuffer(fb), - intel_plane->frontbuffer_bit); + intel_frontbuffer_flush(to_intel_frontbuffer(new_plane_state->base.fb), ORIGIN_FLIP); + intel_frontbuffer_track(to_intel_frontbuffer(old_plane_state->base.fb), + to_intel_frontbuffer(new_plane_state->base.fb), + plane->frontbuffer_bit); /* Swap plane state */ - plane->state = new_plane_state; + plane->base.state = &new_plane_state->base; /* * We cannot swap crtc_state as it may be in use by an atomic commit or @@ -14785,27 +15364,24 @@ intel_legacy_cursor_update(struct drm_plane *plane, */ crtc_state->active_planes = new_crtc_state->active_planes; - if (plane->state->visible) - intel_update_plane(intel_plane, crtc_state, - to_intel_plane_state(plane->state)); + if (new_plane_state->base.visible) + intel_update_plane(plane, crtc_state, new_plane_state); else - intel_disable_plane(intel_plane, crtc_state); + intel_disable_plane(plane, crtc_state); - intel_plane_unpin_fb(to_intel_plane_state(old_plane_state)); + intel_plane_unpin_fb(old_plane_state); -out_unlock: - mutex_unlock(&dev_priv->drm.struct_mutex); out_free: if (new_crtc_state) - intel_crtc_destroy_state(crtc, &new_crtc_state->base); + intel_crtc_destroy_state(&crtc->base, &new_crtc_state->base); if (ret) - intel_plane_destroy_state(plane, new_plane_state); + intel_plane_destroy_state(&plane->base, &new_plane_state->base); else - intel_plane_destroy_state(plane, old_plane_state); + intel_plane_destroy_state(&plane->base, &old_plane_state->base); return ret; slow: - return drm_atomic_helper_update_plane(plane, crtc, fb, + return drm_atomic_helper_update_plane(&plane->base, &crtc->base, fb, crtc_x, crtc_y, crtc_w, crtc_h, src_x, src_y, src_w, src_h, ctx); } @@ -14846,7 +15422,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) const u64 *modifiers; const u32 *formats; int num_formats; - int ret; + int ret, zpos; if (INTEL_GEN(dev_priv) >= 9) return skl_universal_plane_create(dev_priv, pipe, @@ -14876,8 +15452,26 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) } if (INTEL_GEN(dev_priv) >= 4) { - formats = i965_primary_formats; - num_formats = ARRAY_SIZE(i965_primary_formats); + /* + * WaFP16GammaEnabling:ivb + * "Workaround : When using the 64-bit format, the plane + * output on each color channel has one quarter amplitude. + * It can be brought up to full amplitude by using pipe + * gamma correction or pipe color space conversion to + * multiply the plane output by four." + * + * There is no dedicated plane gamma for the primary plane, + * and using the pipe gamma/csc could conflict with other + * planes, so we choose not to expose fp16 on IVB primary + * planes. HSW primary planes no longer have this problem. + */ + if (IS_IVYBRIDGE(dev_priv)) { + formats = ivb_primary_formats; + num_formats = ARRAY_SIZE(ivb_primary_formats); + } else { + formats = i965_primary_formats; + num_formats = ARRAY_SIZE(i965_primary_formats); + } modifiers = i9xx_format_modifiers; plane->max_stride = i9xx_plane_max_stride; @@ -14886,6 +15480,15 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->get_hw_state = i9xx_plane_get_hw_state; plane->check_plane = i9xx_plane_check; + if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + plane->min_cdclk = hsw_plane_min_cdclk; + else if (IS_IVYBRIDGE(dev_priv)) + plane->min_cdclk = ivb_plane_min_cdclk; + else if (IS_CHERRYVIEW(dev_priv) || IS_VALLEYVIEW(dev_priv)) + plane->min_cdclk = vlv_plane_min_cdclk; + else + plane->min_cdclk = i9xx_plane_min_cdclk; + plane_funcs = &i965_plane_funcs; } else { formats = i8xx_primary_formats; @@ -14897,6 +15500,7 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) plane->disable_plane = i9xx_disable_plane; plane->get_hw_state = i9xx_plane_get_hw_state; plane->check_plane = i9xx_plane_check; + plane->min_cdclk = i9xx_plane_min_cdclk; plane_funcs = &i8xx_plane_funcs; } @@ -14935,6 +15539,9 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe) DRM_MODE_ROTATE_0, supported_rotations); + zpos = 0; + drm_plane_create_zpos_immutable_property(&plane->base, zpos); + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); return plane; @@ -14951,7 +15558,7 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, { unsigned int possible_crtcs; struct intel_plane *cursor; - int ret; + int ret, zpos; cursor = intel_plane_alloc(); if (IS_ERR(cursor)) @@ -15000,6 +15607,9 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv, DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180); + zpos = RUNTIME_INFO(dev_priv)->num_sprites[pipe] + 1; + drm_plane_create_zpos_immutable_property(&cursor->base, zpos); + drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs); return cursor; @@ -15075,12 +15685,12 @@ static const struct drm_crtc_funcs i965_crtc_funcs = { .disable_vblank = i965_disable_vblank, }; -static const struct drm_crtc_funcs i945gm_crtc_funcs = { +static const struct drm_crtc_funcs i915gm_crtc_funcs = { INTEL_CRTC_FUNCS, .get_vblank_counter = i915_get_vblank_counter, - .enable_vblank = i945gm_enable_vblank, - .disable_vblank = i945gm_disable_vblank, + .enable_vblank = i915gm_enable_vblank, + .disable_vblank = i915gm_disable_vblank, }; static const struct drm_crtc_funcs i915_crtc_funcs = { @@ -15151,8 +15761,8 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) funcs = &g4x_crtc_funcs; else if (IS_GEN(dev_priv, 4)) funcs = &i965_crtc_funcs; - else if (IS_I945GM(dev_priv)) - funcs = &i945gm_crtc_funcs; + else if (IS_I945GM(dev_priv) || IS_I915GM(dev_priv)) + funcs = &i915gm_crtc_funcs; else if (IS_GEN(dev_priv, 3)) funcs = &i915_crtc_funcs; else @@ -15187,8 +15797,6 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe) dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc; } - drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); - intel_color_init(intel_crtc); WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe); @@ -15223,21 +15831,32 @@ int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, return 0; } -static int intel_encoder_clones(struct intel_encoder *encoder) +static u32 intel_encoder_possible_clones(struct intel_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct intel_encoder *source_encoder; - int index_mask = 0; - int entry = 0; + u32 possible_clones = 0; for_each_intel_encoder(dev, source_encoder) { if (encoders_cloneable(encoder, source_encoder)) - index_mask |= (1 << entry); + possible_clones |= drm_encoder_mask(&source_encoder->base); + } - entry++; + return possible_clones; +} + +static u32 intel_encoder_possible_crtcs(struct intel_encoder *encoder) +{ + struct drm_device *dev = encoder->base.dev; + struct intel_crtc *crtc; + u32 possible_crtcs = 0; + + for_each_intel_crtc(dev, crtc) { + if (encoder->pipe_mask & BIT(crtc->pipe)) + possible_crtcs |= drm_crtc_mask(&crtc->base); } - return index_mask; + return possible_crtcs; } static bool ilk_has_edp_a(struct drm_i915_private *dev_priv) @@ -15319,13 +15938,18 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_pps_init(dev_priv); - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) return; if (INTEL_GEN(dev_priv) >= 12) { - /* TODO: initialize TC ports as well */ intel_ddi_init(dev_priv, PORT_A); intel_ddi_init(dev_priv, PORT_B); + intel_ddi_init(dev_priv, PORT_D); + intel_ddi_init(dev_priv, PORT_E); + intel_ddi_init(dev_priv, PORT_F); + intel_ddi_init(dev_priv, PORT_G); + intel_ddi_init(dev_priv, PORT_H); + intel_ddi_init(dev_priv, PORT_I); icl_dsi_init(dev_priv); } else if (IS_ELKHARTLAKE(dev_priv)) { intel_ddi_init(dev_priv, PORT_A); @@ -15535,9 +16159,10 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv) intel_psr_init(dev_priv); for_each_intel_encoder(&dev_priv->drm, encoder) { - encoder->base.possible_crtcs = encoder->crtc_mask; + encoder->base.possible_crtcs = + intel_encoder_possible_crtcs(encoder); encoder->base.possible_clones = - intel_encoder_clones(encoder); + intel_encoder_possible_clones(encoder); } intel_init_pch_refclk(dev_priv); @@ -15792,8 +16417,14 @@ intel_mode_valid(struct drm_device *dev, DRM_MODE_FLAG_CLKDIV2)) return MODE_BAD; - if (INTEL_GEN(dev_priv) >= 9 || - IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { + /* Transcoder timing limits */ + if (INTEL_GEN(dev_priv) >= 11) { + hdisplay_max = 16384; + vdisplay_max = 8192; + htotal_max = 16384; + vtotal_max = 8192; + } else if (INTEL_GEN(dev_priv) >= 9 || + IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) { hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */ vdisplay_max = 4096; htotal_max = 8192; @@ -15822,6 +16453,56 @@ intel_mode_valid(struct drm_device *dev, mode->vtotal > vtotal_max) return MODE_V_ILLEGAL; + if (INTEL_GEN(dev_priv) >= 5) { + if (mode->hdisplay < 64 || + mode->htotal - mode->hdisplay < 32) + return MODE_H_ILLEGAL; + + if (mode->vtotal - mode->vdisplay < 5) + return MODE_V_ILLEGAL; + } else { + if (mode->htotal - mode->hdisplay < 32) + return MODE_H_ILLEGAL; + + if (mode->vtotal - mode->vdisplay < 3) + return MODE_V_ILLEGAL; + } + + return MODE_OK; +} + +enum drm_mode_status +intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, + const struct drm_display_mode *mode) +{ + int plane_width_max, plane_height_max; + + /* + * intel_mode_valid() should be + * sufficient on older platforms. + */ + if (INTEL_GEN(dev_priv) < 9) + return MODE_OK; + + /* + * Most people will probably want a fullscreen + * plane so let's not advertize modes that are + * too big for that. + */ + if (INTEL_GEN(dev_priv) >= 11) { + plane_width_max = 5120; + plane_height_max = 4320; + } else { + plane_width_max = 5120; + plane_height_max = 4096; + } + + if (mode->hdisplay > plane_width_max) + return MODE_H_ILLEGAL; + + if (mode->vdisplay > plane_height_max) + return MODE_V_ILLEGAL; + return MODE_OK; } @@ -15925,47 +16606,17 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv) } if (INTEL_GEN(dev_priv) >= 9) - dev_priv->display.update_crtcs = skl_update_crtcs; - else - dev_priv->display.update_crtcs = intel_update_crtcs; -} - -static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv) -{ - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - return VLV_VGACNTRL; - else if (INTEL_GEN(dev_priv) >= 5) - return CPU_VGACNTRL; + dev_priv->display.commit_modeset_enables = skl_commit_modeset_enables; else - return VGACNTRL; -} - -/* Disable the VGA plane that we never use */ -static void i915_disable_vga(struct drm_i915_private *dev_priv) -{ - struct pci_dev *pdev = dev_priv->drm.pdev; - u8 sr1; - i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); + dev_priv->display.commit_modeset_enables = intel_commit_modeset_enables; - /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ - vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); - outb(SR01, VGA_SR_INDEX); - sr1 = inb(VGA_SR_DATA); - outb(sr1 | 1<<5, VGA_SR_DATA); - vga_put(pdev, VGA_RSRC_LEGACY_IO); - udelay(300); - - I915_WRITE(vga_reg, VGA_DISP_DISABLE); - POSTING_READ(vga_reg); } -void intel_modeset_init_hw(struct drm_device *dev) +void intel_modeset_init_hw(struct drm_i915_private *i915) { - struct drm_i915_private *dev_priv = to_i915(dev); - - intel_update_cdclk(dev_priv); - intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); - dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw; + intel_update_cdclk(i915); + intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK"); + i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw; } /* @@ -16125,114 +16776,111 @@ out: return ret; } -int intel_modeset_init(struct drm_device *dev) +static void intel_mode_config_init(struct drm_i915_private *i915) { - struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe; - struct intel_crtc *crtc; - int ret; + struct drm_mode_config *mode_config = &i915->drm.mode_config; - dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); + drm_mode_config_init(&i915->drm); - drm_mode_config_init(dev); + mode_config->min_width = 0; + mode_config->min_height = 0; - ret = intel_bw_init(dev_priv); - if (ret) - return ret; - - dev->mode_config.min_width = 0; - dev->mode_config.min_height = 0; - - dev->mode_config.preferred_depth = 24; - dev->mode_config.prefer_shadow = 1; - - dev->mode_config.allow_fb_modifiers = true; - - dev->mode_config.funcs = &intel_mode_funcs; - - init_llist_head(&dev_priv->atomic_helper.free_list); - INIT_WORK(&dev_priv->atomic_helper.free_work, - intel_atomic_helper_free_state_worker); + mode_config->preferred_depth = 24; + mode_config->prefer_shadow = 1; - intel_init_quirks(dev_priv); + mode_config->allow_fb_modifiers = true; - intel_fbc_init(dev_priv); - - intel_init_pm(dev_priv); - - /* - * There may be no VBT; and if the BIOS enabled SSC we can - * just keep using it to avoid unnecessary flicker. Whereas if the - * BIOS isn't using it, don't assume it will work even if the VBT - * indicates as much. - */ - if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { - bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) & - DREF_SSC1_ENABLE); - - if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) { - DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n", - bios_lvds_use_ssc ? "en" : "dis", - dev_priv->vbt.lvds_use_ssc ? "en" : "dis"); - dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc; - } - } + mode_config->funcs = &intel_mode_funcs; /* * Maximum framebuffer dimensions, chosen to match * the maximum render engine surface size on gen4+. */ - if (INTEL_GEN(dev_priv) >= 7) { - dev->mode_config.max_width = 16384; - dev->mode_config.max_height = 16384; - } else if (INTEL_GEN(dev_priv) >= 4) { - dev->mode_config.max_width = 8192; - dev->mode_config.max_height = 8192; - } else if (IS_GEN(dev_priv, 3)) { - dev->mode_config.max_width = 4096; - dev->mode_config.max_height = 4096; + if (INTEL_GEN(i915) >= 7) { + mode_config->max_width = 16384; + mode_config->max_height = 16384; + } else if (INTEL_GEN(i915) >= 4) { + mode_config->max_width = 8192; + mode_config->max_height = 8192; + } else if (IS_GEN(i915, 3)) { + mode_config->max_width = 4096; + mode_config->max_height = 4096; } else { - dev->mode_config.max_width = 2048; - dev->mode_config.max_height = 2048; + mode_config->max_width = 2048; + mode_config->max_height = 2048; } - if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) { - dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512; - dev->mode_config.cursor_height = 1023; - } else if (IS_GEN(dev_priv, 2)) { - dev->mode_config.cursor_width = 64; - dev->mode_config.cursor_height = 64; + if (IS_I845G(i915) || IS_I865G(i915)) { + mode_config->cursor_width = IS_I845G(i915) ? 64 : 512; + mode_config->cursor_height = 1023; + } else if (IS_GEN(i915, 2)) { + mode_config->cursor_width = 64; + mode_config->cursor_height = 64; } else { - dev->mode_config.cursor_width = 256; - dev->mode_config.cursor_height = 256; + mode_config->cursor_width = 256; + mode_config->cursor_height = 256; } +} - DRM_DEBUG_KMS("%d display pipe%s available.\n", - INTEL_INFO(dev_priv)->num_pipes, - INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : ""); +int intel_modeset_init(struct drm_i915_private *i915) +{ + struct drm_device *dev = &i915->drm; + enum pipe pipe; + struct intel_crtc *crtc; + int ret; - for_each_pipe(dev_priv, pipe) { - ret = intel_crtc_init(dev_priv, pipe); - if (ret) { - drm_mode_config_cleanup(dev); - return ret; + i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0); + i915->flip_wq = alloc_workqueue("i915_flip", WQ_HIGHPRI | + WQ_UNBOUND, WQ_UNBOUND_MAX_ACTIVE); + + intel_mode_config_init(i915); + + ret = intel_bw_init(i915); + if (ret) + return ret; + + init_llist_head(&i915->atomic_helper.free_list); + INIT_WORK(&i915->atomic_helper.free_work, + intel_atomic_helper_free_state_worker); + + intel_init_quirks(i915); + + intel_fbc_init(i915); + + intel_init_pm(i915); + + intel_panel_sanitize_ssc(i915); + + intel_gmbus_setup(i915); + + DRM_DEBUG_KMS("%d display pipe%s available.\n", + INTEL_NUM_PIPES(i915), + INTEL_NUM_PIPES(i915) > 1 ? "s" : ""); + + if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { + for_each_pipe(i915, pipe) { + ret = intel_crtc_init(i915, pipe); + if (ret) { + drm_mode_config_cleanup(dev); + return ret; + } } } intel_shared_dpll_init(dev); - intel_update_fdi_pll_freq(dev_priv); + intel_update_fdi_pll_freq(i915); - intel_update_czclk(dev_priv); - intel_modeset_init_hw(dev); + intel_update_czclk(i915); + intel_modeset_init_hw(i915); - intel_hdcp_component_init(dev_priv); + intel_hdcp_component_init(i915); - if (dev_priv->max_cdclk_freq == 0) - intel_update_max_cdclk(dev_priv); + if (i915->max_cdclk_freq == 0) + intel_update_max_cdclk(i915); /* Just disable it once at startup */ - i915_disable_vga(dev_priv); - intel_setup_outputs(dev_priv); + intel_vga_disable(i915); + intel_setup_outputs(i915); drm_modeset_lock_all(dev); intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx); @@ -16251,8 +16899,7 @@ int intel_modeset_init(struct drm_device *dev) * can even allow for smooth boot transitions if the BIOS * fb is large enough for the active pipe configuration. */ - dev_priv->display.get_initial_plane_config(crtc, - &plane_config); + i915->display.get_initial_plane_config(crtc, &plane_config); /* * If the fb is shared between multiple heads, we'll @@ -16266,7 +16913,7 @@ int intel_modeset_init(struct drm_device *dev) * Note that we need to do this after reconstructing the BIOS fb's * since the watermark calculation done here will use pstate->fb. */ - if (!HAS_GMCH(dev_priv)) + if (!HAS_GMCH(i915)) sanitize_watermarks(dev); /* @@ -16591,39 +17238,6 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) icl_sanitize_encoder_pll_mapping(encoder); } -void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv) -{ - i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv); - - if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { - DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); - i915_disable_vga(dev_priv); - } -} - -void i915_redisable_vga(struct drm_i915_private *dev_priv) -{ - intel_wakeref_t wakeref; - - /* - * This function can be called both from intel_modeset_setup_hw_state or - * at a very early point in our resume sequence, where the power well - * structures are not yet restored. Since this function is at a very - * paranoid "someone might have enabled VGA while we were not looking" - * level, just check if the power well is enabled instead of trying to - * follow the "don't touch the power well if we don't need it" policy - * the rest of the driver uses. - */ - wakeref = intel_display_power_get_if_enabled(dev_priv, - POWER_DOMAIN_VGA); - if (!wakeref) - return; - - i915_redisable_vga_power_on(dev_priv); - - intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref); -} - /* FIXME read out full plane state for all planes */ static void readout_plane_state(struct drm_i915_private *dev_priv) { @@ -16667,7 +17281,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct drm_connector_list_iter conn_iter; int i; - dev_priv->active_crtcs = 0; + dev_priv->active_pipes = 0; for_each_intel_crtc(dev, crtc) { struct intel_crtc_state *crtc_state = @@ -16684,7 +17298,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) crtc->active = crtc_state->base.active; if (crtc_state->base.active) - dev_priv->active_crtcs |= 1 << crtc->pipe; + dev_priv->active_pipes |= BIT(crtc->pipe); DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n", crtc->base.base.id, crtc->base.name, @@ -16744,24 +17358,28 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) drm_connector_list_iter_begin(dev, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (connector->get_hw_state(connector)) { + struct intel_crtc_state *crtc_state; + struct intel_crtc *crtc; + connector->base.dpms = DRM_MODE_DPMS_ON; encoder = connector->encoder; connector->base.encoder = &encoder->base; - if (encoder->base.crtc && - encoder->base.crtc->state->active) { + crtc = to_intel_crtc(encoder->base.crtc); + crtc_state = crtc ? to_intel_crtc_state(crtc->base.state) : NULL; + + if (crtc_state && crtc_state->base.active) { /* * This has to be done during hardware readout * because anything calling .crtc_disable may * rely on the connector_mask being accurate. */ - encoder->base.crtc->state->connector_mask |= + crtc_state->base.connector_mask |= drm_connector_mask(&connector->base); - encoder->base.crtc->state->encoder_mask |= + crtc_state->base.encoder_mask |= drm_encoder_mask(&encoder->base); } - } else { connector->base.dpms = DRM_MODE_DPMS_OFF; connector->base.encoder = NULL; @@ -16780,13 +17398,16 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) struct intel_plane *plane; int min_cdclk = 0; - memset(&crtc->base.mode, 0, sizeof(crtc->base.mode)); if (crtc_state->base.active) { - intel_mode_from_pipe_config(&crtc->base.mode, crtc_state); - crtc->base.mode.hdisplay = crtc_state->pipe_src_w; - crtc->base.mode.vdisplay = crtc_state->pipe_src_h; - intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state); - WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode)); + struct drm_display_mode mode; + + intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, + crtc_state); + + mode = crtc_state->base.adjusted_mode; + mode.hdisplay = crtc_state->pipe_src_w; + mode.vdisplay = crtc_state->pipe_src_h; + WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->base, &mode)); /* * The initial mode needs to be set in order to keep @@ -16801,21 +17422,9 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) intel_crtc_compute_pixel_rate(crtc_state); - if (dev_priv->display.modeset_calc_cdclk) { - min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); - if (WARN_ON(min_cdclk < 0)) - min_cdclk = 0; - } - - drm_calc_timestamping_constants(&crtc->base, - &crtc_state->base.adjusted_mode); - update_scanline_offset(crtc_state); + intel_crtc_update_active_timings(crtc_state); } - dev_priv->min_cdclk[crtc->pipe] = min_cdclk; - dev_priv->min_voltage_level[crtc->pipe] = - crtc_state->min_voltage_level; - for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) { const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); @@ -16827,8 +17436,34 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) if (plane_state->base.visible) crtc_state->data_rate[plane->id] = 4 * crtc_state->pixel_rate; + /* + * FIXME don't have the fb yet, so can't + * use plane->min_cdclk() :( + */ + if (plane_state->base.visible && plane->min_cdclk) { + if (crtc_state->double_wide || + INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + crtc_state->min_cdclk[plane->id] = + DIV_ROUND_UP(crtc_state->pixel_rate, 2); + else + crtc_state->min_cdclk[plane->id] = + crtc_state->pixel_rate; + } + DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n", + plane->base.base.id, plane->base.name, + crtc_state->min_cdclk[plane->id]); + } + + if (crtc_state->base.active) { + min_cdclk = intel_crtc_compute_min_cdclk(crtc_state); + if (WARN_ON(min_cdclk < 0)) + min_cdclk = 0; } + dev_priv->min_cdclk[crtc->pipe] = min_cdclk; + dev_priv->min_voltage_level[crtc->pipe] = + crtc_state->min_voltage_level; + intel_bw_crtc_update(bw_state, crtc_state); intel_pipe_config_sanity_check(dev_priv, crtc_state); @@ -17069,13 +17704,13 @@ void intel_display_resume(struct drm_device *dev) drm_atomic_state_put(state); } -static void intel_hpd_poll_fini(struct drm_device *dev) +static void intel_hpd_poll_fini(struct drm_i915_private *i915) { struct intel_connector *connector; struct drm_connector_list_iter conn_iter; /* Kill all the work that may have been queued by hpd. */ - drm_connector_list_iter_begin(dev, &conn_iter); + drm_connector_list_iter_begin(&i915->drm, &conn_iter); for_each_intel_connector_iter(connector, &conn_iter) { if (connector->modeset_retry_work.func) cancel_work_sync(&connector->modeset_retry_work); @@ -17087,78 +17722,49 @@ static void intel_hpd_poll_fini(struct drm_device *dev) drm_connector_list_iter_end(&conn_iter); } -void intel_modeset_driver_remove(struct drm_device *dev) +void intel_modeset_driver_remove(struct drm_i915_private *i915) { - struct drm_i915_private *dev_priv = to_i915(dev); + flush_workqueue(i915->flip_wq); + flush_workqueue(i915->modeset_wq); - flush_workqueue(dev_priv->modeset_wq); - - flush_work(&dev_priv->atomic_helper.free_work); - WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list)); + flush_work(&i915->atomic_helper.free_work); + WARN_ON(!llist_empty(&i915->atomic_helper.free_list)); /* * Interrupts and polling as the first thing to avoid creating havoc. * Too much stuff here (turning of connectors, ...) would * experience fancy races otherwise. */ - intel_irq_uninstall(dev_priv); + intel_irq_uninstall(i915); /* * Due to the hpd irq storm handling the hotplug work can re-arm the * poll handlers. Hence disable polling after hpd handling is shut down. */ - intel_hpd_poll_fini(dev); + intel_hpd_poll_fini(i915); /* poll work can call into fbdev, hence clean that up afterwards */ - intel_fbdev_fini(dev_priv); + intel_fbdev_fini(i915); intel_unregister_dsm_handler(); - intel_fbc_global_disable(dev_priv); + intel_fbc_global_disable(i915); /* flush any delayed tasks or pending work */ flush_scheduled_work(); - intel_hdcp_component_fini(dev_priv); - - drm_mode_config_cleanup(dev); + intel_hdcp_component_fini(i915); - intel_overlay_cleanup(dev_priv); + drm_mode_config_cleanup(&i915->drm); - intel_gmbus_teardown(dev_priv); + intel_overlay_cleanup(i915); - destroy_workqueue(dev_priv->modeset_wq); + intel_gmbus_teardown(i915); - intel_fbc_cleanup_cfb(dev_priv); -} + destroy_workqueue(i915->flip_wq); + destroy_workqueue(i915->modeset_wq); -/* - * set vga decode state - true == enable VGA decode - */ -int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state) -{ - unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; - u16 gmch_ctrl; - - if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) { - DRM_ERROR("failed to read control word\n"); - return -EIO; - } - - if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state) - return 0; - - if (state) - gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; - else - gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; - - if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) { - DRM_ERROR("failed to write control word\n"); - return -EIO; - } - - return 0; + intel_fbc_cleanup_cfb(i915); } #if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) @@ -17221,7 +17827,7 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv) BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder)); - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) return NULL; error = kzalloc(sizeof(*error), GFP_ATOMIC); @@ -17300,7 +17906,7 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, if (!error) return; - err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes); + err_printf(m, "Num Pipes: %d\n", INTEL_NUM_PIPES(dev_priv)); if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) err_printf(m, "PWR_WELL_CTL2: %08x\n", error->power_well_driver); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 01fa87ad3270..f417e0948001 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -1,5 +1,5 @@ /* - * Copyright © 2006-2017 Intel Corporation + * Copyright © 2006-2019 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), @@ -32,8 +32,10 @@ enum link_m_n_set; struct dpll; struct drm_connector; struct drm_device; +struct drm_display_mode; struct drm_encoder; struct drm_file; +struct drm_format_info; struct drm_framebuffer; struct drm_i915_error_state_buf; struct drm_i915_gem_object; @@ -52,6 +54,7 @@ struct intel_plane; struct intel_plane_state; struct intel_remapped_info; struct intel_rotation_info; +struct intel_crtc_state; enum i915_gpio { GPIOA, @@ -91,6 +94,7 @@ enum pipe { #define pipe_name(p) ((p) + 'A') enum transcoder { + INVALID_TRANSCODER = -1, /* * The following transcoders have a 1:1 transcoder -> pipe mapping, * keep their values fixed: the code assumes that TRANSCODER_A=0, the @@ -182,6 +186,24 @@ enum plane_id { for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \ for_each_if((__crtc)->plane_ids_mask & BIT(__p)) +enum port { + PORT_NONE = -1, + + PORT_A = 0, + PORT_B, + PORT_C, + PORT_D, + PORT_E, + PORT_F, + PORT_G, + PORT_H, + PORT_I, + + I915_MAX_PORTS +}; + +#define port_name(p) ((p) + 'A') + /* * Ports identifier referenced from other drivers. * Expected to remain stable over time @@ -251,6 +273,7 @@ enum aux_ch { AUX_CH_D, AUX_CH_E, /* ICL+ */ AUX_CH_F, + AUX_CH_G, }; #define aux_ch_name(a) ((a) + 'A') @@ -289,10 +312,10 @@ enum phy_fia { }; #define for_each_pipe(__dev_priv, __p) \ - for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) + for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) #define for_each_pipe_masked(__dev_priv, __p, __mask) \ - for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) \ + for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \ for_each_if((__mask) & BIT(__p)) #define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \ @@ -330,7 +353,7 @@ enum phy_fia { &(dev)->mode_config.plane_list, \ base.head) \ for_each_if((plane_mask) & \ - drm_plane_mask(&intel_plane->base))) + drm_plane_mask(&intel_plane->base)) #define for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) \ list_for_each_entry(intel_plane, \ @@ -411,6 +434,23 @@ enum phy_fia { (__i)++) \ for_each_if(crtc) +#define for_each_oldnew_intel_crtc_in_state_reverse(__state, crtc, old_crtc_state, new_crtc_state, __i) \ + for ((__i) = (__state)->base.dev->mode_config.num_crtc - 1; \ + (__i) >= 0 && \ + ((crtc) = to_intel_crtc((__state)->base.crtcs[__i].ptr), \ + (old_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].old_state), \ + (new_crtc_state) = to_intel_crtc_state((__state)->base.crtcs[__i].new_state), 1); \ + (__i)--) \ + for_each_if(crtc) + +#define intel_atomic_crtc_state_for_each_plane_state( \ + plane, plane_state, \ + crtc_state) \ + for_each_intel_plane_mask(((crtc_state)->base.state->dev), (plane), \ + ((crtc_state)->base.plane_mask)) \ + for_each_if ((plane_state = \ + to_intel_plane_state(__drm_atomic_get_current_plane_state((crtc_state)->base.state, &plane->base)))) + void intel_link_compute_m_n(u16 bpp, int nlanes, int pixel_clock, int link_clock, struct intel_link_m_n *m_n, @@ -420,7 +460,11 @@ void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv); u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, u32 pixel_format, u64 modifier); bool intel_plane_can_remap(const struct intel_plane_state *plane_state); +enum drm_mode_status +intel_mode_valid_max_plane_size(struct drm_i915_private *dev_priv, + const struct drm_display_mode *mode); enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port); +bool is_trans_port_sync_mode(const struct intel_crtc_state *state); void intel_plane_destroy(struct drm_plane *plane); void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe); @@ -464,7 +508,6 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv, struct intel_digital_port *dport, unsigned int expected_mask); int intel_get_load_detect_pipe(struct drm_connector *connector, - const struct drm_display_mode *mode, struct intel_load_detect_pipe *old, struct drm_modeset_acquire_ctx *ctx); void intel_release_load_detect_pipe(struct drm_connector *connector, @@ -499,8 +542,6 @@ void intel_dp_get_m_n(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n); -void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, struct dpll *best_clock); @@ -520,8 +561,6 @@ void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc, u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_center); int skl_update_scaler_crtc(struct intel_crtc_state *crtc_state); -int skl_max_scale(const struct intel_crtc_state *crtc_state, - u32 pixel_format); u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state); @@ -544,13 +583,10 @@ void intel_display_print_error_state(struct drm_i915_error_state_buf *e, struct intel_display_error_state *error); /* modesetting */ -void intel_modeset_init_hw(struct drm_device *dev); -int intel_modeset_init(struct drm_device *dev); -void intel_modeset_driver_remove(struct drm_device *dev); -int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state); +void intel_modeset_init_hw(struct drm_i915_private *i915); +int intel_modeset_init(struct drm_i915_private *i915); +void intel_modeset_driver_remove(struct drm_i915_private *i915); void intel_display_resume(struct drm_device *dev); -void i915_redisable_vga(struct drm_i915_private *dev_priv); -void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv); void intel_init_pch_refclk(struct drm_i915_private *dev_priv); /* modesetting asserts */ diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index c002f234ff31..ce1b64f4dd44 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -3,8 +3,6 @@ * Copyright © 2019 Intel Corporation */ -#include <linux/vgaarb.h> - #include "display/intel_crt.h" #include "display/intel_dp.h" @@ -19,16 +17,14 @@ #include "intel_hotplug.h" #include "intel_sideband.h" #include "intel_tc.h" +#include "intel_vga.h" bool intel_display_power_well_is_enabled(struct drm_i915_private *dev_priv, enum i915_power_well_id power_well_id); const char * -intel_display_power_domain_str(struct drm_i915_private *i915, - enum intel_display_power_domain domain) +intel_display_power_domain_str(enum intel_display_power_domain domain) { - bool ddi_tc_ports = IS_GEN(i915, 12); - switch (domain) { case POWER_DOMAIN_DISPLAY_CORE: return "DISPLAY_CORE"; @@ -71,23 +67,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915, case POWER_DOMAIN_PORT_DDI_C_LANES: return "PORT_DDI_C_LANES"; case POWER_DOMAIN_PORT_DDI_D_LANES: - BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_LANES != - POWER_DOMAIN_PORT_DDI_TC1_LANES); - return ddi_tc_ports ? "PORT_DDI_TC1_LANES" : "PORT_DDI_D_LANES"; + return "PORT_DDI_D_LANES"; case POWER_DOMAIN_PORT_DDI_E_LANES: - BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_LANES != - POWER_DOMAIN_PORT_DDI_TC2_LANES); - return ddi_tc_ports ? "PORT_DDI_TC2_LANES" : "PORT_DDI_E_LANES"; + return "PORT_DDI_E_LANES"; case POWER_DOMAIN_PORT_DDI_F_LANES: - BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_LANES != - POWER_DOMAIN_PORT_DDI_TC3_LANES); - return ddi_tc_ports ? "PORT_DDI_TC3_LANES" : "PORT_DDI_F_LANES"; - case POWER_DOMAIN_PORT_DDI_TC4_LANES: - return "PORT_DDI_TC4_LANES"; - case POWER_DOMAIN_PORT_DDI_TC5_LANES: - return "PORT_DDI_TC5_LANES"; - case POWER_DOMAIN_PORT_DDI_TC6_LANES: - return "PORT_DDI_TC6_LANES"; + return "PORT_DDI_F_LANES"; + case POWER_DOMAIN_PORT_DDI_G_LANES: + return "PORT_DDI_G_LANES"; + case POWER_DOMAIN_PORT_DDI_H_LANES: + return "PORT_DDI_H_LANES"; + case POWER_DOMAIN_PORT_DDI_I_LANES: + return "PORT_DDI_I_LANES"; case POWER_DOMAIN_PORT_DDI_A_IO: return "PORT_DDI_A_IO"; case POWER_DOMAIN_PORT_DDI_B_IO: @@ -95,23 +85,17 @@ intel_display_power_domain_str(struct drm_i915_private *i915, case POWER_DOMAIN_PORT_DDI_C_IO: return "PORT_DDI_C_IO"; case POWER_DOMAIN_PORT_DDI_D_IO: - BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_D_IO != - POWER_DOMAIN_PORT_DDI_TC1_IO); - return ddi_tc_ports ? "PORT_DDI_TC1_IO" : "PORT_DDI_D_IO"; + return "PORT_DDI_D_IO"; case POWER_DOMAIN_PORT_DDI_E_IO: - BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_E_IO != - POWER_DOMAIN_PORT_DDI_TC2_IO); - return ddi_tc_ports ? "PORT_DDI_TC2_IO" : "PORT_DDI_E_IO"; + return "PORT_DDI_E_IO"; case POWER_DOMAIN_PORT_DDI_F_IO: - BUILD_BUG_ON(POWER_DOMAIN_PORT_DDI_F_IO != - POWER_DOMAIN_PORT_DDI_TC3_IO); - return ddi_tc_ports ? "PORT_DDI_TC3_IO" : "PORT_DDI_F_IO"; - case POWER_DOMAIN_PORT_DDI_TC4_IO: - return "PORT_DDI_TC4_IO"; - case POWER_DOMAIN_PORT_DDI_TC5_IO: - return "PORT_DDI_TC5_IO"; - case POWER_DOMAIN_PORT_DDI_TC6_IO: - return "PORT_DDI_TC6_IO"; + return "PORT_DDI_F_IO"; + case POWER_DOMAIN_PORT_DDI_G_IO: + return "PORT_DDI_G_IO"; + case POWER_DOMAIN_PORT_DDI_H_IO: + return "PORT_DDI_H_IO"; + case POWER_DOMAIN_PORT_DDI_I_IO: + return "PORT_DDI_I_IO"; case POWER_DOMAIN_PORT_DSI: return "PORT_DSI"; case POWER_DOMAIN_PORT_CRT: @@ -129,34 +113,33 @@ intel_display_power_domain_str(struct drm_i915_private *i915, case POWER_DOMAIN_AUX_C: return "AUX_C"; case POWER_DOMAIN_AUX_D: - BUILD_BUG_ON(POWER_DOMAIN_AUX_D != POWER_DOMAIN_AUX_TC1); - return ddi_tc_ports ? "AUX_TC1" : "AUX_D"; + return "AUX_D"; case POWER_DOMAIN_AUX_E: - BUILD_BUG_ON(POWER_DOMAIN_AUX_E != POWER_DOMAIN_AUX_TC2); - return ddi_tc_ports ? "AUX_TC2" : "AUX_E"; + return "AUX_E"; case POWER_DOMAIN_AUX_F: - BUILD_BUG_ON(POWER_DOMAIN_AUX_F != POWER_DOMAIN_AUX_TC3); - return ddi_tc_ports ? "AUX_TC3" : "AUX_F"; - case POWER_DOMAIN_AUX_TC4: - return "AUX_TC4"; - case POWER_DOMAIN_AUX_TC5: - return "AUX_TC5"; - case POWER_DOMAIN_AUX_TC6: - return "AUX_TC6"; + return "AUX_F"; + case POWER_DOMAIN_AUX_G: + return "AUX_G"; + case POWER_DOMAIN_AUX_H: + return "AUX_H"; + case POWER_DOMAIN_AUX_I: + return "AUX_I"; case POWER_DOMAIN_AUX_IO_A: return "AUX_IO_A"; - case POWER_DOMAIN_AUX_TBT1: - return "AUX_TBT1"; - case POWER_DOMAIN_AUX_TBT2: - return "AUX_TBT2"; - case POWER_DOMAIN_AUX_TBT3: - return "AUX_TBT3"; - case POWER_DOMAIN_AUX_TBT4: - return "AUX_TBT4"; - case POWER_DOMAIN_AUX_TBT5: - return "AUX_TBT5"; - case POWER_DOMAIN_AUX_TBT6: - return "AUX_TBT6"; + case POWER_DOMAIN_AUX_C_TBT: + return "AUX_C_TBT"; + case POWER_DOMAIN_AUX_D_TBT: + return "AUX_D_TBT"; + case POWER_DOMAIN_AUX_E_TBT: + return "AUX_E_TBT"; + case POWER_DOMAIN_AUX_F_TBT: + return "AUX_F_TBT"; + case POWER_DOMAIN_AUX_G_TBT: + return "AUX_G_TBT"; + case POWER_DOMAIN_AUX_H_TBT: + return "AUX_H_TBT"; + case POWER_DOMAIN_AUX_I_TBT: + return "AUX_I_TBT"; case POWER_DOMAIN_GMBUS: return "GMBUS"; case POWER_DOMAIN_INIT: @@ -283,23 +266,8 @@ bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv, u8 irq_pipe_mask, bool has_vga) { - struct pci_dev *pdev = dev_priv->drm.pdev; - - /* - * After we re-enable the power well, if we touch VGA register 0x3d5 - * we'll get unclaimed register interrupts. This stops after we write - * anything to the VGA MSR register. The vgacon module uses this - * register all the time, so if we unbind our driver and, as a - * consequence, bind vgacon, we'll get stuck in an infinite loop at - * console_unlock(). So make here we touch the VGA MSR register, making - * sure vgacon can keep working normally without triggering interrupts - * and error messages. - */ - if (has_vga) { - vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); - outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); - vga_put(pdev, VGA_RSRC_LEGACY_IO); - } + if (has_vga) + intel_vga_reset_io_mem(dev_priv); if (irq_pipe_mask) gen8_irq_power_well_post_enable(dev_priv, irq_pipe_mask); @@ -578,6 +546,8 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv, #endif +#define TGL_AUX_PW_TO_TC_PORT(pw_idx) ((pw_idx) - TGL_PW_CTL_IDX_AUX_TC1) + static void icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) @@ -594,6 +564,17 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, I915_WRITE(DP_AUX_CH_CTL(aux_ch), val); hsw_power_well_enable(dev_priv, power_well); + + if (INTEL_GEN(dev_priv) >= 12 && !power_well->desc->hsw.is_tc_tbt) { + enum tc_port tc_port; + + tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx); + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); + + if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), + DKL_CMN_UC_DW27_UC_HEALTH, 1)) + DRM_WARN("Timeout waiting TC uC health\n"); + } } static void @@ -714,7 +695,11 @@ static u32 gen9_dc_mask(struct drm_i915_private *dev_priv) u32 mask; mask = DC_STATE_EN_UPTO_DC5; - if (INTEL_GEN(dev_priv) >= 11) + + if (INTEL_GEN(dev_priv) >= 12) + mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6 + | DC_STATE_EN_DC9; + else if (IS_GEN(dev_priv, 11)) mask |= DC_STATE_EN_UPTO_DC6 | DC_STATE_EN_DC9; else if (IS_GEN9_LP(dev_priv)) mask |= DC_STATE_EN_DC9; @@ -784,6 +769,52 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state) dev_priv->csr.dc_state = val & mask; } +static u32 +sanitize_target_dc_state(struct drm_i915_private *dev_priv, + u32 target_dc_state) +{ + u32 states[] = { + DC_STATE_EN_UPTO_DC6, + DC_STATE_EN_UPTO_DC5, + DC_STATE_EN_DC3CO, + DC_STATE_DISABLE, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(states) - 1; i++) { + if (target_dc_state != states[i]) + continue; + + if (dev_priv->csr.allowed_dc_mask & target_dc_state) + break; + + target_dc_state = states[i + 1]; + } + + return target_dc_state; +} + +static void tgl_enable_dc3co(struct drm_i915_private *dev_priv) +{ + DRM_DEBUG_KMS("Enabling DC3CO\n"); + gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO); +} + +static void tgl_disable_dc3co(struct drm_i915_private *dev_priv) +{ + u32 val; + + DRM_DEBUG_KMS("Disabling DC3CO\n"); + val = I915_READ(DC_STATE_EN); + val &= ~DC_STATE_DC3CO_STATUS; + I915_WRITE(DC_STATE_EN, val); + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); + /* + * Delay of 200us DC3CO Exit time B.Spec 49196 + */ + usleep_range(200, 210); +} + static void bxt_enable_dc9(struct drm_i915_private *dev_priv) { assert_can_enable_dc9(dev_priv); @@ -839,6 +870,51 @@ lookup_power_well(struct drm_i915_private *dev_priv, return &dev_priv->power_domains.power_wells[0]; } +/** + * intel_display_power_set_target_dc_state - Set target dc state. + * @dev_priv: i915 device + * @state: state which needs to be set as target_dc_state. + * + * This function set the "DC off" power well target_dc_state, + * based upon this target_dc_stste, "DC off" power well will + * enable desired DC state. + */ +void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, + u32 state) +{ + struct i915_power_well *power_well; + bool dc_off_enabled; + struct i915_power_domains *power_domains = &dev_priv->power_domains; + + mutex_lock(&power_domains->lock); + power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF); + + if (WARN_ON(!power_well)) + goto unlock; + + state = sanitize_target_dc_state(dev_priv, state); + + if (state == dev_priv->csr.target_dc_state) + goto unlock; + + dc_off_enabled = power_well->desc->ops->is_enabled(dev_priv, + power_well); + /* + * If DC off power well is disabled, need to enable and disable the + * DC off power well to effect target DC state. + */ + if (!dc_off_enabled) + power_well->desc->ops->enable(dev_priv, power_well); + + dev_priv->csr.target_dc_state = state; + + if (!dc_off_enabled) + power_well->desc->ops->disable(dev_priv, power_well); + +unlock: + mutex_unlock(&power_domains->lock); +} + static void assert_can_enable_dc5(struct drm_i915_private *dev_priv) { bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv, @@ -951,7 +1027,8 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv) static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv, struct i915_power_well *power_well) { - return (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0; + return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 && + (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0); } static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv) @@ -967,6 +1044,11 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv) { struct intel_cdclk_state cdclk_state = {}; + if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) { + tgl_disable_dc3co(dev_priv); + return; + } + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); dev_priv->display.get_cdclk(dev_priv, &cdclk_state); @@ -999,10 +1081,17 @@ static void gen9_dc_off_power_well_disable(struct drm_i915_private *dev_priv, if (!dev_priv->csr.dmc_payload) return; - if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC6) + switch (dev_priv->csr.target_dc_state) { + case DC_STATE_EN_DC3CO: + tgl_enable_dc3co(dev_priv); + break; + case DC_STATE_EN_UPTO_DC6: skl_enable_dc6(dev_priv); - else if (dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5) + break; + case DC_STATE_EN_UPTO_DC5: gen9_enable_dc5(dev_priv); + break; + } } static void i9xx_power_well_sync_hw_noop(struct drm_i915_private *dev_priv, @@ -1208,7 +1297,7 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) intel_crt_reset(&encoder->base); } - i915_redisable_vga_power_on(dev_priv); + intel_vga_redisable_power_on(dev_priv); intel_pps_unlock_regs_wa(dev_priv); } @@ -1718,15 +1807,12 @@ __async_put_domains_state_ok(struct i915_power_domains *power_domains) static void print_power_domains(struct i915_power_domains *power_domains, const char *prefix, u64 mask) { - struct drm_i915_private *i915 = - container_of(power_domains, struct drm_i915_private, - power_domains); enum intel_display_power_domain domain; DRM_DEBUG_DRIVER("%s (%lu):\n", prefix, hweight64(mask)); for_each_power_domain(domain, mask) DRM_DEBUG_DRIVER("%s use_count %d\n", - intel_display_power_domain_str(i915, domain), + intel_display_power_domain_str(domain), power_domains->domain_use_count[domain]); } @@ -1896,7 +1982,7 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv, { struct i915_power_domains *power_domains; struct i915_power_well *power_well; - const char *name = intel_display_power_domain_str(dev_priv, domain); + const char *name = intel_display_power_domain_str(domain); power_domains = &dev_priv->power_domains; @@ -2487,10 +2573,10 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_AUX_D) | \ BIT_ULL(POWER_DOMAIN_AUX_E) | \ BIT_ULL(POWER_DOMAIN_AUX_F) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ + BIT_ULL(POWER_DOMAIN_AUX_C_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ BIT_ULL(POWER_DOMAIN_VGA) | \ BIT_ULL(POWER_DOMAIN_AUDIO) | \ BIT_ULL(POWER_DOMAIN_INIT)) @@ -2530,22 +2616,22 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_AUX_A)) #define ICL_AUX_B_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_B)) -#define ICL_AUX_C_IO_POWER_DOMAINS ( \ +#define ICL_AUX_C_TC1_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_C)) -#define ICL_AUX_D_IO_POWER_DOMAINS ( \ +#define ICL_AUX_D_TC2_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_D)) -#define ICL_AUX_E_IO_POWER_DOMAINS ( \ +#define ICL_AUX_E_TC3_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_E)) -#define ICL_AUX_F_IO_POWER_DOMAINS ( \ +#define ICL_AUX_F_TC4_IO_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_AUX_F)) -#define ICL_AUX_TBT1_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1)) -#define ICL_AUX_TBT2_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2)) -#define ICL_AUX_TBT3_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3)) -#define ICL_AUX_TBT4_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4)) +#define ICL_AUX_C_TBT1_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_C_TBT)) +#define ICL_AUX_D_TBT2_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) +#define ICL_AUX_E_TBT3_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) +#define ICL_AUX_F_TBT4_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) #define TGL_PW_5_POWER_DOMAINS ( \ BIT_ULL(POWER_DOMAIN_PIPE_D) | \ @@ -2565,24 +2651,24 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, BIT_ULL(POWER_DOMAIN_PIPE_B) | \ BIT_ULL(POWER_DOMAIN_TRANSCODER_B) | \ BIT_ULL(POWER_DOMAIN_PIPE_B_PANEL_FITTER) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_LANES) | \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_LANES) | \ - BIT_ULL(POWER_DOMAIN_AUX_TC1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TC2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TC3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TC4) | \ - BIT_ULL(POWER_DOMAIN_AUX_TC5) | \ - BIT_ULL(POWER_DOMAIN_AUX_TC6) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT1) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT2) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT3) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT4) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT5) | \ - BIT_ULL(POWER_DOMAIN_AUX_TBT6) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_G_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_H_LANES) | \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_I_LANES) | \ + BIT_ULL(POWER_DOMAIN_AUX_D) | \ + BIT_ULL(POWER_DOMAIN_AUX_E) | \ + BIT_ULL(POWER_DOMAIN_AUX_F) | \ + BIT_ULL(POWER_DOMAIN_AUX_G) | \ + BIT_ULL(POWER_DOMAIN_AUX_H) | \ + BIT_ULL(POWER_DOMAIN_AUX_I) | \ + BIT_ULL(POWER_DOMAIN_AUX_D_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_E_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_F_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_G_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_H_TBT) | \ + BIT_ULL(POWER_DOMAIN_AUX_I_TBT) | \ BIT_ULL(POWER_DOMAIN_VGA) | \ BIT_ULL(POWER_DOMAIN_AUDIO) | \ BIT_ULL(POWER_DOMAIN_INIT)) @@ -2596,37 +2682,54 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, TGL_PW_2_POWER_DOMAINS | \ BIT_ULL(POWER_DOMAIN_MODESET) | \ BIT_ULL(POWER_DOMAIN_AUX_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_B) | \ + BIT_ULL(POWER_DOMAIN_AUX_C) | \ BIT_ULL(POWER_DOMAIN_INIT)) -#define TGL_DDI_IO_TC1_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC1_IO)) -#define TGL_DDI_IO_TC2_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC2_IO)) -#define TGL_DDI_IO_TC3_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC3_IO)) -#define TGL_DDI_IO_TC4_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC4_IO)) -#define TGL_DDI_IO_TC5_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC5_IO)) -#define TGL_DDI_IO_TC6_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_PORT_DDI_TC6_IO)) - -#define TGL_AUX_TC1_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TC1)) -#define TGL_AUX_TC2_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TC2)) -#define TGL_AUX_TC3_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TC3)) -#define TGL_AUX_TC4_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TC4)) -#define TGL_AUX_TC5_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TC5)) -#define TGL_AUX_TC6_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TC6)) -#define TGL_AUX_TBT5_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT5)) -#define TGL_AUX_TBT6_IO_POWER_DOMAINS ( \ - BIT_ULL(POWER_DOMAIN_AUX_TBT6)) +#define TGL_DDI_IO_D_TC1_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_D_IO)) +#define TGL_DDI_IO_E_TC2_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_E_IO)) +#define TGL_DDI_IO_F_TC3_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_F_IO)) +#define TGL_DDI_IO_G_TC4_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_G_IO)) +#define TGL_DDI_IO_H_TC5_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_H_IO)) +#define TGL_DDI_IO_I_TC6_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_PORT_DDI_I_IO)) + +#define TGL_AUX_A_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_IO_A) | \ + BIT_ULL(POWER_DOMAIN_AUX_A)) +#define TGL_AUX_B_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_B)) +#define TGL_AUX_C_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_C)) +#define TGL_AUX_D_TC1_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_D)) +#define TGL_AUX_E_TC2_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_E)) +#define TGL_AUX_F_TC3_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_F)) +#define TGL_AUX_G_TC4_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_G)) +#define TGL_AUX_H_TC5_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_H)) +#define TGL_AUX_I_TC6_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_I)) +#define TGL_AUX_D_TBT1_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_D_TBT)) +#define TGL_AUX_E_TBT2_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_E_TBT)) +#define TGL_AUX_F_TBT3_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_F_TBT)) +#define TGL_AUX_G_TBT4_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_G_TBT)) +#define TGL_AUX_H_TBT5_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_H_TBT)) +#define TGL_AUX_I_TBT6_IO_POWER_DOMAINS ( \ + BIT_ULL(POWER_DOMAIN_AUX_I_TBT)) static const struct i915_power_well_ops i9xx_always_on_power_well_ops = { .sync_hw = i9xx_power_well_sync_hw_noop, @@ -2938,7 +3041,7 @@ static const struct i915_power_well_desc skl_power_wells[] = { .name = "DC off", .domains = SKL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = DISP_PW_ID_NONE, + .id = SKL_DISP_DC_OFF, }, { .name = "power well 2", @@ -3020,7 +3123,7 @@ static const struct i915_power_well_desc bxt_power_wells[] = { .name = "DC off", .domains = BXT_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = DISP_PW_ID_NONE, + .id = SKL_DISP_DC_OFF, }, { .name = "power well 2", @@ -3080,7 +3183,7 @@ static const struct i915_power_well_desc glk_power_wells[] = { .name = "DC off", .domains = GLK_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = DISP_PW_ID_NONE, + .id = SKL_DISP_DC_OFF, }, { .name = "power well 2", @@ -3249,7 +3352,7 @@ static const struct i915_power_well_desc cnl_power_wells[] = { .name = "DC off", .domains = CNL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = DISP_PW_ID_NONE, + .id = SKL_DISP_DC_OFF, }, { .name = "power well 2", @@ -3377,7 +3480,7 @@ static const struct i915_power_well_desc icl_power_wells[] = { .name = "DC off", .domains = ICL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = DISP_PW_ID_NONE, + .id = SKL_DISP_DC_OFF, }, { .name = "power well 2", @@ -3484,8 +3587,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX C", - .domains = ICL_AUX_C_IO_POWER_DOMAINS, + .name = "AUX C TC1", + .domains = ICL_AUX_C_TC1_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3495,8 +3598,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX D", - .domains = ICL_AUX_D_IO_POWER_DOMAINS, + .name = "AUX D TC2", + .domains = ICL_AUX_D_TC2_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3506,8 +3609,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX E", - .domains = ICL_AUX_E_IO_POWER_DOMAINS, + .name = "AUX E TC3", + .domains = ICL_AUX_E_TC3_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3517,8 +3620,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX F", - .domains = ICL_AUX_F_IO_POWER_DOMAINS, + .name = "AUX F TC4", + .domains = ICL_AUX_F_TC4_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3528,8 +3631,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX TBT1", - .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, + .name = "AUX C TBT1", + .domains = ICL_AUX_C_TBT1_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3539,8 +3642,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX TBT2", - .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, + .name = "AUX D TBT2", + .domains = ICL_AUX_D_TBT2_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3550,8 +3653,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX TBT3", - .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, + .name = "AUX E TBT3", + .domains = ICL_AUX_E_TBT3_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3561,8 +3664,8 @@ static const struct i915_power_well_desc icl_power_wells[] = { }, }, { - .name = "AUX TBT4", - .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, + .name = "AUX F TBT4", + .domains = ICL_AUX_F_TBT4_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3610,7 +3713,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { .name = "DC off", .domains = TGL_DISPLAY_DC_OFF_POWER_DOMAINS, .ops = &gen9_dc_off_power_well_ops, - .id = DISP_PW_ID_NONE, + .id = SKL_DISP_DC_OFF, }, { .name = "power well 2", @@ -3667,8 +3770,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { } }, { - .name = "DDI TC1 IO", - .domains = TGL_DDI_IO_TC1_POWER_DOMAINS, + .name = "DDI D TC1 IO", + .domains = TGL_DDI_IO_D_TC1_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3677,8 +3780,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI TC2 IO", - .domains = TGL_DDI_IO_TC2_POWER_DOMAINS, + .name = "DDI E TC2 IO", + .domains = TGL_DDI_IO_E_TC2_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3687,8 +3790,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI TC3 IO", - .domains = TGL_DDI_IO_TC3_POWER_DOMAINS, + .name = "DDI F TC3 IO", + .domains = TGL_DDI_IO_F_TC3_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3697,8 +3800,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI TC4 IO", - .domains = TGL_DDI_IO_TC4_POWER_DOMAINS, + .name = "DDI G TC4 IO", + .domains = TGL_DDI_IO_G_TC4_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3707,8 +3810,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI TC5 IO", - .domains = TGL_DDI_IO_TC5_POWER_DOMAINS, + .name = "DDI H TC5 IO", + .domains = TGL_DDI_IO_H_TC5_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3717,8 +3820,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "DDI TC6 IO", - .domains = TGL_DDI_IO_TC6_POWER_DOMAINS, + .name = "DDI I TC6 IO", + .domains = TGL_DDI_IO_I_TC6_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3728,7 +3831,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, { .name = "AUX A", - .domains = ICL_AUX_A_IO_POWER_DOMAINS, + .domains = TGL_AUX_A_IO_POWER_DOMAINS, .ops = &icl_combo_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3738,7 +3841,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, { .name = "AUX B", - .domains = ICL_AUX_B_IO_POWER_DOMAINS, + .domains = TGL_AUX_B_IO_POWER_DOMAINS, .ops = &icl_combo_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3748,7 +3851,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, { .name = "AUX C", - .domains = ICL_AUX_C_IO_POWER_DOMAINS, + .domains = TGL_AUX_C_IO_POWER_DOMAINS, .ops = &icl_combo_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3757,8 +3860,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TC1", - .domains = TGL_AUX_TC1_IO_POWER_DOMAINS, + .name = "AUX D TC1", + .domains = TGL_AUX_D_TC1_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3768,8 +3871,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TC2", - .domains = TGL_AUX_TC2_IO_POWER_DOMAINS, + .name = "AUX E TC2", + .domains = TGL_AUX_E_TC2_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3779,8 +3882,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TC3", - .domains = TGL_AUX_TC3_IO_POWER_DOMAINS, + .name = "AUX F TC3", + .domains = TGL_AUX_F_TC3_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3790,8 +3893,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TC4", - .domains = TGL_AUX_TC4_IO_POWER_DOMAINS, + .name = "AUX G TC4", + .domains = TGL_AUX_G_TC4_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3801,8 +3904,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TC5", - .domains = TGL_AUX_TC5_IO_POWER_DOMAINS, + .name = "AUX H TC5", + .domains = TGL_AUX_H_TC5_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3812,8 +3915,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TC6", - .domains = TGL_AUX_TC6_IO_POWER_DOMAINS, + .name = "AUX I TC6", + .domains = TGL_AUX_I_TC6_IO_POWER_DOMAINS, .ops = &icl_tc_phy_aux_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3823,8 +3926,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TBT1", - .domains = ICL_AUX_TBT1_IO_POWER_DOMAINS, + .name = "AUX D TBT1", + .domains = TGL_AUX_D_TBT1_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3834,8 +3937,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TBT2", - .domains = ICL_AUX_TBT2_IO_POWER_DOMAINS, + .name = "AUX E TBT2", + .domains = TGL_AUX_E_TBT2_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3845,8 +3948,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TBT3", - .domains = ICL_AUX_TBT3_IO_POWER_DOMAINS, + .name = "AUX F TBT3", + .domains = TGL_AUX_F_TBT3_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3856,8 +3959,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TBT4", - .domains = ICL_AUX_TBT4_IO_POWER_DOMAINS, + .name = "AUX G TBT4", + .domains = TGL_AUX_G_TBT4_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3867,8 +3970,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TBT5", - .domains = TGL_AUX_TBT5_IO_POWER_DOMAINS, + .name = "AUX H TBT5", + .domains = TGL_AUX_H_TBT5_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3878,8 +3981,8 @@ static const struct i915_power_well_desc tgl_power_wells[] = { }, }, { - .name = "AUX TBT6", - .domains = TGL_AUX_TBT6_IO_POWER_DOMAINS, + .name = "AUX I TBT6", + .domains = TGL_AUX_I_TBT6_IO_POWER_DOMAINS, .ops = &hsw_power_well_ops, .id = DISP_PW_ID_NONE, { @@ -3931,14 +4034,17 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, int requested_dc; int max_dc; - if (INTEL_GEN(dev_priv) >= 11) { - max_dc = 2; + if (INTEL_GEN(dev_priv) >= 12) { + max_dc = 4; /* * DC9 has a separate HW flow from the rest of the DC states, * not depending on the DMC firmware. It's needed by system * suspend/resume, so allow it unconditionally. */ mask = DC_STATE_EN_DC9; + } else if (IS_GEN(dev_priv, 11)) { + max_dc = 2; + mask = DC_STATE_EN_DC9; } else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv)) { max_dc = 2; mask = 0; @@ -3957,7 +4063,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, requested_dc = enable_dc; } else if (enable_dc == -1) { requested_dc = max_dc; - } else if (enable_dc > max_dc && enable_dc <= 2) { + } else if (enable_dc > max_dc && enable_dc <= 4) { DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n", enable_dc, max_dc); requested_dc = max_dc; @@ -3966,10 +4072,20 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv, requested_dc = max_dc; } - if (requested_dc > 1) + switch (requested_dc) { + case 4: + mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC6; + break; + case 3: + mask |= DC_STATE_EN_DC3CO | DC_STATE_EN_UPTO_DC5; + break; + case 2: mask |= DC_STATE_EN_UPTO_DC6; - if (requested_dc > 0) + break; + case 1: mask |= DC_STATE_EN_UPTO_DC5; + break; + } DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask); @@ -4030,6 +4146,9 @@ int intel_power_domains_init(struct drm_i915_private *dev_priv) dev_priv->csr.allowed_dc_mask = get_allowed_dc_mask(dev_priv, i915_modparams.enable_dc); + dev_priv->csr.target_dc_state = + sanitize_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); + BUILD_BUG_ON(POWER_DOMAIN_NUM > 64); mutex_init(&power_domains->lock); @@ -5107,8 +5226,7 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915) for_each_power_domain(domain, power_well->desc->domains) DRM_DEBUG_DRIVER(" %-23s %d\n", - intel_display_power_domain_str(i915, - domain), + intel_display_power_domain_str(domain), power_domains->domain_use_count[domain]); } } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h index a50605b8b1ad..1da04f3e0fb3 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.h +++ b/drivers/gpu/drm/i915/display/intel_display_power.h @@ -36,29 +36,20 @@ enum intel_display_power_domain { POWER_DOMAIN_PORT_DDI_B_LANES, POWER_DOMAIN_PORT_DDI_C_LANES, POWER_DOMAIN_PORT_DDI_D_LANES, - POWER_DOMAIN_PORT_DDI_TC1_LANES = POWER_DOMAIN_PORT_DDI_D_LANES, POWER_DOMAIN_PORT_DDI_E_LANES, - POWER_DOMAIN_PORT_DDI_TC2_LANES = POWER_DOMAIN_PORT_DDI_E_LANES, POWER_DOMAIN_PORT_DDI_F_LANES, - POWER_DOMAIN_PORT_DDI_TC3_LANES = POWER_DOMAIN_PORT_DDI_F_LANES, - POWER_DOMAIN_PORT_DDI_TC4_LANES, - POWER_DOMAIN_PORT_DDI_TC5_LANES, - POWER_DOMAIN_PORT_DDI_TC6_LANES, + POWER_DOMAIN_PORT_DDI_G_LANES, + POWER_DOMAIN_PORT_DDI_H_LANES, + POWER_DOMAIN_PORT_DDI_I_LANES, POWER_DOMAIN_PORT_DDI_A_IO, POWER_DOMAIN_PORT_DDI_B_IO, POWER_DOMAIN_PORT_DDI_C_IO, POWER_DOMAIN_PORT_DDI_D_IO, - POWER_DOMAIN_PORT_DDI_TC1_IO = POWER_DOMAIN_PORT_DDI_D_IO, POWER_DOMAIN_PORT_DDI_E_IO, - POWER_DOMAIN_PORT_DDI_TC2_IO = POWER_DOMAIN_PORT_DDI_E_IO, POWER_DOMAIN_PORT_DDI_F_IO, - POWER_DOMAIN_PORT_DDI_TC3_IO = POWER_DOMAIN_PORT_DDI_F_IO, POWER_DOMAIN_PORT_DDI_G_IO, - POWER_DOMAIN_PORT_DDI_TC4_IO = POWER_DOMAIN_PORT_DDI_G_IO, POWER_DOMAIN_PORT_DDI_H_IO, - POWER_DOMAIN_PORT_DDI_TC5_IO = POWER_DOMAIN_PORT_DDI_H_IO, POWER_DOMAIN_PORT_DDI_I_IO, - POWER_DOMAIN_PORT_DDI_TC6_IO = POWER_DOMAIN_PORT_DDI_I_IO, POWER_DOMAIN_PORT_DSI, POWER_DOMAIN_PORT_CRT, POWER_DOMAIN_PORT_OTHER, @@ -68,21 +59,19 @@ enum intel_display_power_domain { POWER_DOMAIN_AUX_B, POWER_DOMAIN_AUX_C, POWER_DOMAIN_AUX_D, - POWER_DOMAIN_AUX_TC1 = POWER_DOMAIN_AUX_D, POWER_DOMAIN_AUX_E, - POWER_DOMAIN_AUX_TC2 = POWER_DOMAIN_AUX_E, POWER_DOMAIN_AUX_F, - POWER_DOMAIN_AUX_TC3 = POWER_DOMAIN_AUX_F, - POWER_DOMAIN_AUX_TC4, - POWER_DOMAIN_AUX_TC5, - POWER_DOMAIN_AUX_TC6, + POWER_DOMAIN_AUX_G, + POWER_DOMAIN_AUX_H, + POWER_DOMAIN_AUX_I, POWER_DOMAIN_AUX_IO_A, - POWER_DOMAIN_AUX_TBT1, - POWER_DOMAIN_AUX_TBT2, - POWER_DOMAIN_AUX_TBT3, - POWER_DOMAIN_AUX_TBT4, - POWER_DOMAIN_AUX_TBT5, - POWER_DOMAIN_AUX_TBT6, + POWER_DOMAIN_AUX_C_TBT, + POWER_DOMAIN_AUX_D_TBT, + POWER_DOMAIN_AUX_E_TBT, + POWER_DOMAIN_AUX_F_TBT, + POWER_DOMAIN_AUX_G_TBT, + POWER_DOMAIN_AUX_H_TBT, + POWER_DOMAIN_AUX_I_TBT, POWER_DOMAIN_GMBUS, POWER_DOMAIN_MODESET, POWER_DOMAIN_GT_IRQ, @@ -111,6 +100,7 @@ enum i915_power_well_id { SKL_DISP_PW_MISC_IO, SKL_DISP_PW_1, SKL_DISP_PW_2, + SKL_DISP_DC_OFF, }; #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A) @@ -267,10 +257,11 @@ void intel_display_power_suspend_late(struct drm_i915_private *i915); void intel_display_power_resume_early(struct drm_i915_private *i915); void intel_display_power_suspend(struct drm_i915_private *i915); void intel_display_power_resume(struct drm_i915_private *i915); +void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv, + u32 state); const char * -intel_display_power_domain_str(struct drm_i915_private *i915, - enum intel_display_power_domain domain); +intel_display_power_domain_str(enum intel_display_power_domain domain); bool intel_display_power_is_enabled(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 4075b0387c87..1a7334dbe802 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -128,7 +128,8 @@ struct intel_encoder { enum intel_output_type type; enum port port; - unsigned int cloneable; + u16 cloneable; + u8 pipe_mask; enum intel_hotplug_state (*hotplug)(struct intel_encoder *encoder, struct intel_connector *connector, bool irq_received); @@ -187,7 +188,6 @@ struct intel_encoder { * device interrupts are disabled. */ void (*suspend)(struct intel_encoder *); - int crtc_mask; enum hpd_pin hpd_pin; enum intel_display_power_domain power_domain; /* for communication with audio component; protected by av_mutex */ @@ -388,6 +388,13 @@ struct intel_hdcp { wait_queue_head_t cp_irq_queue; atomic_t cp_irq_count; int cp_irq_count_cached; + + /* + * HDCP register access for gen12+ need the transcoder associated. + * Transcoder attached to the connector could be changed at modeset. + * Hence caching the transcoder here. + */ + enum transcoder cpu_transcoder; }; struct intel_connector { @@ -481,9 +488,9 @@ struct intel_atomic_state { * but the converse is not necessarily true; simply changing a mode may * not flip the final active status of any CRTC's */ - unsigned int active_pipe_changes; + u8 active_pipe_changes; - unsigned int active_crtcs; + u8 active_pipes; /* minimum acceptable cdclk for each pipe */ int min_cdclk[I915_MAX_PIPES]; /* minimum acceptable voltage level for each pipe */ @@ -499,6 +506,14 @@ struct intel_atomic_state { bool rps_interactive; + /* + * active_pipes + * min_cdclk[] + * min_voltage_level[] + * cdclk.* + */ + bool global_state_changed; + /* Gen9+ only */ struct skl_ddb_values wm_results; @@ -552,24 +567,24 @@ struct intel_plane_state { int scaler_id; /* - * linked_plane: + * planar_linked_plane: * * ICL planar formats require 2 planes that are updated as pairs. * This member is used to make sure the other plane is also updated * when required, and for update_slave() to find the correct * plane_state to pass as argument. */ - struct intel_plane *linked_plane; + struct intel_plane *planar_linked_plane; /* - * slave: + * planar_slave: * If set don't update use the linked plane's state for updating * this plane during atomic commit with the update_slave() callback. * * It's also used by the watermark code to ignore wm calculations on * this plane. They're calculated by the linked plane's wm code. */ - u32 slave; + u32 planar_slave; struct drm_intel_sprite_colorkey ckey; }; @@ -759,7 +774,6 @@ struct intel_crtc_state { bool update_pipe; /* can a fast modeset be performed? */ bool disable_cxsr; bool update_wm_pre, update_wm_post; /* watermarks are updated */ - bool fb_changed; /* fb on any of the planes is changed */ bool fifo_changed; /* FIFO split is changed */ bool preload_luts; @@ -865,6 +879,7 @@ struct intel_crtc_state { bool has_psr; bool has_psr2; + u32 dc3co_exitline; /* * Frequence the dpll for the port should run at. Differs from the @@ -926,6 +941,8 @@ struct intel_crtc_state { struct intel_crtc_wm_state wm; + int min_cdclk[I915_MAX_PLANES]; + u32 data_rate[I915_MAX_PLANES]; /* Gamma mode programmed on the pipe */ @@ -980,11 +997,17 @@ struct intel_crtc_state { bool dsc_split; u16 compressed_bpp; u8 slice_count; - } dsc_params; - struct drm_dsc_config dp_dsc_cfg; + struct drm_dsc_config config; + } dsc; /* Forward Error correction State */ bool fec_enable; + + /* Pointer to master transcoder in case of tiled displays */ + enum transcoder master_transcoder; + + /* Bitmask to indicate slaves attached */ + u8 sync_mode_slaves_mask; }; struct intel_crtc { @@ -1027,6 +1050,9 @@ struct intel_crtc { /* scalers available on this crtc */ int num_scalers; + + /* per pipe DSB related info */ + struct intel_dsb dsb; }; struct intel_plane { @@ -1062,6 +1088,8 @@ struct intel_plane { bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); int (*check_plane)(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); + int (*min_cdclk)(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); }; struct intel_watermark_params { @@ -1177,6 +1205,7 @@ struct intel_dp { /* sink or branch descriptor */ struct drm_dp_desc desc; struct drm_dp_aux aux; + u32 aux_busy_last_status; u8 train_set[4]; int panel_power_up_delay; int panel_power_down_delay; @@ -1212,6 +1241,15 @@ struct intel_dp { bool can_mst; /* this port supports mst */ bool is_mst; int active_mst_links; + + /* + * DP_TP_* registers may be either on port or transcoder register space. + */ + struct { + i915_reg_t dp_tp_ctl; + i915_reg_t dp_tp_status; + } regs; + /* connector directly attached - won't be use for modeset in mst world */ struct intel_connector *attached_connector; @@ -1270,6 +1308,7 @@ struct intel_digital_port { char tc_port_name[8]; enum tc_port_mode tc_mode; enum phy_fia tc_phy_fia; + u8 tc_phy_fia_idx; void (*write_infoframe)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -1510,7 +1549,7 @@ intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe) drm_wait_one_vblank(&dev_priv->drm, pipe); } static inline void -intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, int pipe) +intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe) { const struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe); diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 9b15ac4f2fb6..c61ac0c3acb5 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -68,11 +68,6 @@ #define DP_DPRX_ESI_LEN 14 -/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */ -#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440 -#define DP_DSC_MIN_SUPPORTED_BPC 8 -#define DP_DSC_MAX_SUPPORTED_BPC 10 - /* DP DSC throughput values used for slice count calculations KPixels/s */ #define DP_DSC_PEAK_PIXEL_RATE 2720000 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 @@ -500,7 +495,17 @@ u32 intel_dp_mode_to_fec_clock(u32 mode_clock) DP_DSC_FEC_OVERHEAD_FACTOR); } -static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count, +static int +small_joiner_ram_size_bits(struct drm_i915_private *i915) +{ + if (INTEL_GEN(i915) >= 11) + return 7680 * 8; + else + return 6144 * 8; +} + +static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, + u32 link_clock, u32 lane_count, u32 mode_clock, u32 mode_hdisplay) { u32 bits_per_pixel, max_bpp_small_joiner_ram; @@ -517,7 +522,8 @@ static u16 intel_dp_dsc_get_output_bpp(u32 link_clock, u32 lane_count, DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel); /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */ - max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER / mode_hdisplay; + max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) / + mode_hdisplay; DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram); /* @@ -585,6 +591,25 @@ static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp, return 0; } +static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv, + int hdisplay) +{ + /* + * Older platforms don't like hdisplay==4096 with DP. + * + * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline + * and frame counter increment), but we don't get vblank interrupts, + * and the pipe underruns immediately. The link also doesn't seem + * to get trained properly. + * + * On CHV the vblank interrupts don't seem to disappear but + * otherwise the symptoms are similar. + * + * TODO: confirm the behaviour on HSW+ + */ + return hdisplay == 4096 && !HAS_DDI(dev_priv); +} + static enum drm_mode_status intel_dp_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) @@ -620,6 +645,9 @@ intel_dp_mode_valid(struct drm_connector *connector, max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); mode_rate = intel_dp_link_required(target_clock, 18); + if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay)) + return MODE_H_ILLEGAL; + /* * Output bpp is stored in 6.4 format so right shift by 4 to get the * integer value since we support only integer values of bpp. @@ -634,7 +662,8 @@ intel_dp_mode_valid(struct drm_connector *connector, true); } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) { dsc_max_output_bpp = - intel_dp_dsc_get_output_bpp(max_link_clock, + intel_dp_dsc_get_output_bpp(dev_priv, + max_link_clock, max_lanes, target_clock, mode->hdisplay) >> 4; @@ -655,7 +684,7 @@ intel_dp_mode_valid(struct drm_connector *connector, if (mode->flags & DRM_MODE_FLAG_DBLCLK) return MODE_H_ILLEGAL; - return MODE_OK; + return intel_mode_valid_max_plane_size(dev_priv, mode); } u32 intel_dp_pack_aux(const u8 *src, int src_bytes) @@ -732,12 +761,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp) u32 DP; if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN, - "skipping pipe %c power sequencer kick due to port %c being active\n", - pipe_name(pipe), port_name(intel_dig_port->base.port))) + "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n", + pipe_name(pipe), intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name)) return; - DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n", - pipe_name(pipe), port_name(intel_dig_port->base.port)); + DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n", + pipe_name(pipe), intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); /* Preserve the BIOS-computed detected bit. This is * supposed to be read-only. @@ -855,9 +886,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp) vlv_steal_power_sequencer(dev_priv, pipe); intel_dp->pps_pipe = pipe; - DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n", + DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n", pipe_name(intel_dp->pps_pipe), - port_name(intel_dig_port->base.port)); + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); /* init power sequencer on this pipe and port */ intel_dp_init_panel_power_sequencer(intel_dp); @@ -965,13 +997,16 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp) /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */ if (intel_dp->pps_pipe == INVALID_PIPE) { - DRM_DEBUG_KMS("no initial power sequencer for port %c\n", - port_name(port)); + DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); return; } - DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n", - port_name(port), pipe_name(intel_dp->pps_pipe)); + DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name, + pipe_name(intel_dp->pps_pipe)); intel_dp_init_panel_power_sequencer(intel_dp); intel_dp_init_panel_power_sequencer_registers(intel_dp, false); @@ -1144,18 +1179,20 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp); + const unsigned int timeout_ms = 10; u32 status; bool done; #define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0) done = wait_event_timeout(i915->gmbus_wait_queue, C, - msecs_to_jiffies_timeout(10)); + msecs_to_jiffies_timeout(timeout_ms)); /* just trace the final value */ trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); if (!done) - DRM_ERROR("dp aux hw did not signal timeout!\n"); + DRM_ERROR("%s did not complete or timeout within %ums (status 0x%08x)\n", + intel_dp->aux.name, timeout_ms, status); #undef C return status; @@ -1338,13 +1375,12 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true); if (try == 3) { - static u32 last_status = -1; const u32 status = intel_uncore_read(uncore, ch_ctl); - if (status != last_status) { + if (status != intel_dp->aux_busy_last_status) { WARN(1, "dp_aux_ch not started status 0x%08x\n", status); - last_status = status; + intel_dp->aux_busy_last_status = status; } ret = -EBUSY; @@ -1636,6 +1672,7 @@ static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp) case AUX_CH_D: case AUX_CH_E: case AUX_CH_F: + case AUX_CH_G: return DP_AUX_CH_CTL(aux_ch); default: MISSING_CASE(aux_ch); @@ -1656,6 +1693,7 @@ static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index) case AUX_CH_D: case AUX_CH_E: case AUX_CH_F: + case AUX_CH_G: return DP_AUX_CH_DATA(aux_ch, index); default: MISSING_CASE(aux_ch); @@ -1834,8 +1872,14 @@ static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - return INTEL_GEN(dev_priv) >= 11 && - pipe_config->cpu_transcoder != TRANSCODER_A; + /* On TGL, FEC is supported on all Pipes */ + if (INTEL_GEN(dev_priv) >= 12) + return true; + + if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A) + return true; + + return false; } static bool intel_dp_supports_fec(struct intel_dp *intel_dp, @@ -1850,8 +1894,18 @@ static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp, { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - return INTEL_GEN(dev_priv) >= 10 && - pipe_config->cpu_transcoder != TRANSCODER_A; + if (!INTEL_INFO(dev_priv)->display.has_dsc) + return false; + + /* On TGL, DSC is supported on all Pipes */ + if (INTEL_GEN(dev_priv) >= 12) + return true; + + if (INTEL_GEN(dev_priv) >= 10 && + pipe_config->cpu_transcoder != TRANSCODER_A) + return true; + + return false; } static bool intel_dp_supports_dsc(struct intel_dp *intel_dp, @@ -2010,11 +2064,17 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, if (!intel_dp_supports_dsc(intel_dp, pipe_config)) return -EINVAL; - dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC, - conn_state->max_requested_bpc); + /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */ + if (INTEL_GEN(dev_priv) >= 12) + dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc); + else + dsc_max_bpc = min_t(u8, 10, + conn_state->max_requested_bpc); pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc); - if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) { + + /* Min Input BPC for ICL+ is 8 */ + if (pipe_bpp < 8 * 3) { DRM_DEBUG_KMS("No DSC support for less than 8bpc\n"); return -EINVAL; } @@ -2029,10 +2089,10 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, pipe_config->lane_count = limits->max_lane_count; if (intel_dp_is_edp(intel_dp)) { - pipe_config->dsc_params.compressed_bpp = + pipe_config->dsc.compressed_bpp = min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4, pipe_config->pipe_bpp); - pipe_config->dsc_params.slice_count = + pipe_config->dsc.slice_count = drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd, true); } else { @@ -2040,7 +2100,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, u8 dsc_dp_slice_count; dsc_max_output_bpp = - intel_dp_dsc_get_output_bpp(pipe_config->port_clock, + intel_dp_dsc_get_output_bpp(dev_priv, + pipe_config->port_clock, pipe_config->lane_count, adjusted_mode->crtc_clock, adjusted_mode->crtc_hdisplay); @@ -2052,10 +2113,10 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n"); return -EINVAL; } - pipe_config->dsc_params.compressed_bpp = min_t(u16, + pipe_config->dsc.compressed_bpp = min_t(u16, dsc_max_output_bpp >> 4, pipe_config->pipe_bpp); - pipe_config->dsc_params.slice_count = dsc_dp_slice_count; + pipe_config->dsc.slice_count = dsc_dp_slice_count; } /* * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate @@ -2063,8 +2124,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, * then we need to use 2 VDSC instances. */ if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) { - if (pipe_config->dsc_params.slice_count > 1) { - pipe_config->dsc_params.dsc_split = true; + if (pipe_config->dsc.slice_count > 1) { + pipe_config->dsc.dsc_split = true; } else { DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n"); return -EINVAL; @@ -2076,16 +2137,16 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp, DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d " "Compressed BPP = %d\n", pipe_config->pipe_bpp, - pipe_config->dsc_params.compressed_bpp); + pipe_config->dsc.compressed_bpp); return ret; } - pipe_config->dsc_params.compression_enable = true; + pipe_config->dsc.compression_enable = true; DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d " "Compressed Bpp = %d Slice Count = %d\n", pipe_config->pipe_bpp, - pipe_config->dsc_params.compressed_bpp, - pipe_config->dsc_params.slice_count); + pipe_config->dsc.compressed_bpp, + pipe_config->dsc.slice_count); return 0; } @@ -2159,15 +2220,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, return ret; } - if (pipe_config->dsc_params.compression_enable) { + if (pipe_config->dsc.compression_enable) { DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n", pipe_config->lane_count, pipe_config->port_clock, pipe_config->pipe_bpp, - pipe_config->dsc_params.compressed_bpp); + pipe_config->dsc.compressed_bpp); DRM_DEBUG_KMS("DP link rate required %i available %i\n", intel_dp_link_required(adjusted_mode->crtc_clock, - pipe_config->dsc_params.compressed_bpp), + pipe_config->dsc.compressed_bpp), intel_dp_max_data_rate(pipe_config->port_clock, pipe_config->lane_count)); } else { @@ -2222,6 +2283,16 @@ bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode; + /* + * Our YCbCr output is always limited range. + * crtc_state->limited_color_range only applies to RGB, + * and it must never be set for YCbCr or we risk setting + * some conflicting bits in PIPECONF which will mess up + * the colors on the monitor. + */ + if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) + return false; + if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { /* * See: @@ -2259,6 +2330,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, pipe_config->has_pch_encoder = true; pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; + if (lspcon->active) lspcon_ycbcr420_config(&intel_connector->base, pipe_config); else @@ -2304,6 +2376,9 @@ intel_dp_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) return -EINVAL; + if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay)) + return -EINVAL; + ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state); if (ret < 0) return ret; @@ -2311,8 +2386,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, pipe_config->limited_color_range = intel_dp_limited_color_range(pipe_config, conn_state); - if (pipe_config->dsc_params.compression_enable) - output_bpp = pipe_config->dsc_params.compressed_bpp; + if (pipe_config->dsc.compression_enable) + output_bpp = pipe_config->dsc.compressed_bpp; else output_bpp = intel_dp_output_bpp(pipe_config, pipe_config->pipe_bpp); @@ -2339,6 +2414,9 @@ intel_dp_compute_config(struct intel_encoder *encoder, intel_psr_compute_config(intel_dp, pipe_config); + intel_hdcp_transcoder_config(intel_connector, + pipe_config->cpu_transcoder); + return 0; } @@ -2366,6 +2444,9 @@ static void intel_dp_prepare(struct intel_encoder *encoder, intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST)); + intel_dp->regs.dp_tp_ctl = DP_TP_CTL(port); + intel_dp->regs.dp_tp_status = DP_TP_STATUS(port); + /* * There are four kinds of DP registers: * @@ -2567,8 +2648,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) intel_display_power_get(dev_priv, intel_aux_power_domain(intel_dig_port)); - DRM_DEBUG_KMS("Turning eDP port %c VDD on\n", - port_name(intel_dig_port->base.port)); + DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); if (!edp_have_panel_power(intel_dp)) wait_panel_power_cycle(intel_dp); @@ -2587,8 +2669,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) * If the panel wasn't on, delay before accessing aux channel */ if (!edp_have_panel_power(intel_dp)) { - DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n", - port_name(intel_dig_port->base.port)); + DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); msleep(intel_dp->panel_power_up_delay); } @@ -2613,8 +2696,9 @@ void intel_edp_panel_vdd_on(struct intel_dp *intel_dp) vdd = false; with_pps_lock(intel_dp, wakeref) vdd = edp_panel_vdd_on(intel_dp); - I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n", - port_name(dp_to_dig_port(intel_dp)->base.port)); + I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name); } static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) @@ -2632,8 +2716,9 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp) if (!edp_have_panel_vdd(intel_dp)) return; - DRM_DEBUG_KMS("Turning eDP port %c VDD off\n", - port_name(intel_dig_port->base.port)); + DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); pp = ironlake_get_pp_control(intel_dp); pp &= ~EDP_FORCE_VDD; @@ -2695,8 +2780,9 @@ static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) if (!intel_dp_is_edp(intel_dp)) return; - I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on", - port_name(dp_to_dig_port(intel_dp)->base.port)); + I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name); intel_dp->want_panel_vdd = false; @@ -2717,12 +2803,14 @@ static void edp_panel_on(struct intel_dp *intel_dp) if (!intel_dp_is_edp(intel_dp)) return; - DRM_DEBUG_KMS("Turn eDP port %c panel power on\n", - port_name(dp_to_dig_port(intel_dp)->base.port)); + DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name); if (WARN(edp_have_panel_power(intel_dp), - "eDP port %c panel power already on\n", - port_name(dp_to_dig_port(intel_dp)->base.port))) + "[ENCODER:%d:%s] panel power already on\n", + dp_to_dig_port(intel_dp)->base.base.base.id, + dp_to_dig_port(intel_dp)->base.base.name)) return; wait_panel_power_cycle(intel_dp); @@ -2777,11 +2865,11 @@ static void edp_panel_off(struct intel_dp *intel_dp) if (!intel_dp_is_edp(intel_dp)) return; - DRM_DEBUG_KMS("Turn eDP port %c panel power off\n", - port_name(dig_port->base.port)); + DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n", + dig_port->base.base.base.id, dig_port->base.base.name); - WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n", - port_name(dig_port->base.port)); + WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n", + dig_port->base.base.base.id, dig_port->base.base.name); pp = ironlake_get_pp_control(intel_dp); /* We need to switch off panel power _and_ force vdd, for otherwise some @@ -2926,8 +3014,8 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state) bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN; I915_STATE_WARN(cur_state != state, - "DP port %c state assertion failure (expected %s, current %s)\n", - port_name(dig_port->base.port), + "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n", + dig_port->base.base.base.id, dig_port->base.base.name, onoff(state), onoff(cur_state)); } #define assert_dp_port_disabled(d) assert_dp_port((d), false) @@ -3023,7 +3111,7 @@ void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, { int ret; - if (!crtc_state->dsc_params.compression_enable) + if (!crtc_state->dsc.compression_enable) return; ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE, @@ -3315,7 +3403,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, dp_train_pat & train_pat_mask); if (HAS_DDI(dev_priv)) { - u32 temp = I915_READ(DP_TP_CTL(port)); + u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl); if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE) temp |= DP_TP_CTL_SCRAMBLE_DISABLE; @@ -3341,7 +3429,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp, temp |= DP_TP_CTL_LINK_TRAIN_PAT4; break; } - I915_WRITE(DP_TP_CTL(port), temp); + I915_WRITE(intel_dp->regs.dp_tp_ctl, temp); } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) || (HAS_PCH_CPT(dev_priv) && port != PORT_A)) { @@ -3505,8 +3593,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp) * port select always when logically disconnecting a power sequencer * from a port. */ - DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n", - pipe_name(pipe), port_name(intel_dig_port->base.port)); + DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n", + pipe_name(pipe), intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); I915_WRITE(pp_on_reg, 0); POSTING_READ(pp_on_reg); @@ -3522,17 +3611,18 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv, for_each_intel_dp(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); - enum port port = encoder->port; WARN(intel_dp->active_pipe == pipe, - "stealing pipe %c power sequencer from active (e)DP port %c\n", - pipe_name(pipe), port_name(port)); + "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n", + pipe_name(pipe), encoder->base.base.id, + encoder->base.name); if (intel_dp->pps_pipe != pipe) continue; - DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n", - pipe_name(pipe), port_name(port)); + DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n", + pipe_name(pipe), encoder->base.base.id, + encoder->base.name); /* make sure vdd is off before we steal it */ vlv_detach_power_sequencer(intel_dp); @@ -3574,8 +3664,9 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder, /* now it's all ours */ intel_dp->pps_pipe = crtc->pipe; - DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n", - pipe_name(intel_dp->pps_pipe), port_name(encoder->port)); + DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n", + pipe_name(intel_dp->pps_pipe), encoder->base.base.id, + encoder->base.name); /* init power sequencer on this pipe and port */ intel_dp_init_panel_power_sequencer(intel_dp); @@ -4039,22 +4130,22 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp) if (!HAS_DDI(dev_priv)) return; - val = I915_READ(DP_TP_CTL(port)); + val = I915_READ(intel_dp->regs.dp_tp_ctl); val &= ~DP_TP_CTL_LINK_TRAIN_MASK; val |= DP_TP_CTL_LINK_TRAIN_IDLE; - I915_WRITE(DP_TP_CTL(port), val); + I915_WRITE(intel_dp->regs.dp_tp_ctl, val); /* - * On PORT_A we can have only eDP in SST mode. There the only reason - * we need to set idle transmission mode is to work around a HW issue - * where we enable the pipe while not in idle link-training mode. + * Until TGL on PORT_A we can have only eDP in SST mode. There the only + * reason we need to set idle transmission mode is to work around a HW + * issue where we enable the pipe while not in idle link-training mode. * In this case there is requirement to wait for a minimum number of * idle patterns to be sent. */ - if (port == PORT_A) + if (port == PORT_A && INTEL_GEN(dev_priv) < 12) return; - if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port), + if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, DP_TP_STATUS_IDLE_DONE, 1)) DRM_ERROR("Timed out waiting for DP idle patterns\n"); } @@ -4396,9 +4487,10 @@ intel_dp_configure_mst(struct intel_dp *intel_dp) &dp_to_dig_port(intel_dp)->base; bool sink_can_mst = intel_dp_sink_can_mst(intel_dp); - DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n", - port_name(encoder->port), yesno(intel_dp->can_mst), - yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst)); + DRM_DEBUG_KMS("[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n", + encoder->base.base.id, encoder->base.name, + yesno(intel_dp->can_mst), yesno(sink_can_mst), + yesno(i915_modparams.enable_dp_mst)); if (!intel_dp->can_mst) return; @@ -4418,9 +4510,36 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) DP_DPRX_ESI_LEN; } +bool +intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + /* + * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication + * of Color Encoding Format and Content Color Gamut], in order to + * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP. + */ + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) + return true; + + switch (conn_state->colorspace) { + case DRM_MODE_COLORIMETRY_SYCC_601: + case DRM_MODE_COLORIMETRY_OPYCC_601: + case DRM_MODE_COLORIMETRY_BT2020_YCC: + case DRM_MODE_COLORIMETRY_BT2020_RGB: + case DRM_MODE_COLORIMETRY_BT2020_CYCC: + return true; + default: + break; + } + + return false; +} + static void -intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) +intel_dp_setup_vsc_sdp(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); struct dp_sdp vsc_sdp = {}; @@ -4441,13 +4560,55 @@ intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp, */ vsc_sdp.sdp_header.HB3 = 0x13; - /* - * YCbCr 420 = 3h DB16[7:4] ITU-R BT.601 = 0h, ITU-R BT.709 = 1h - * DB16[3:0] DP 1.4a spec, Table 2-120 - */ - vsc_sdp.db[16] = 0x3 << 4; /* 0x3 << 4 , YCbCr 420*/ - /* RGB->YCBCR color conversion uses the BT.709 color space. */ - vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */ + /* DP 1.4a spec, Table 2-120 */ + switch (crtc_state->output_format) { + case INTEL_OUTPUT_FORMAT_YCBCR444: + vsc_sdp.db[16] = 0x1 << 4; /* YCbCr 444 : DB16[7:4] = 1h */ + break; + case INTEL_OUTPUT_FORMAT_YCBCR420: + vsc_sdp.db[16] = 0x3 << 4; /* YCbCr 420 : DB16[7:4] = 3h */ + break; + case INTEL_OUTPUT_FORMAT_RGB: + default: + /* RGB: DB16[7:4] = 0h */ + break; + } + + switch (conn_state->colorspace) { + case DRM_MODE_COLORIMETRY_BT709_YCC: + vsc_sdp.db[16] |= 0x1; + break; + case DRM_MODE_COLORIMETRY_XVYCC_601: + vsc_sdp.db[16] |= 0x2; + break; + case DRM_MODE_COLORIMETRY_XVYCC_709: + vsc_sdp.db[16] |= 0x3; + break; + case DRM_MODE_COLORIMETRY_SYCC_601: + vsc_sdp.db[16] |= 0x4; + break; + case DRM_MODE_COLORIMETRY_OPYCC_601: + vsc_sdp.db[16] |= 0x5; + break; + case DRM_MODE_COLORIMETRY_BT2020_CYCC: + case DRM_MODE_COLORIMETRY_BT2020_RGB: + vsc_sdp.db[16] |= 0x6; + break; + case DRM_MODE_COLORIMETRY_BT2020_YCC: + vsc_sdp.db[16] |= 0x7; + break; + case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65: + case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER: + vsc_sdp.db[16] |= 0x4; /* DCI-P3 (SMPTE RP 431-2) */ + break; + default: + /* sRGB (IEC 61966-2-1) / ITU-R BT.601: DB16[0:3] = 0h */ + + /* RGB->YCBCR color conversion uses the BT.709 color space. */ + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) + vsc_sdp.db[16] |= 0x1; /* 0x1, ITU-R BT.709 */ + break; + } /* * For pixel encoding formats YCbCr444, YCbCr422, YCbCr420, and Y Only, @@ -4499,13 +4660,106 @@ intel_pixel_encoding_setup_vsc(struct intel_dp *intel_dp, crtc_state, DP_SDP_VSC, &vsc_sdp, sizeof(vsc_sdp)); } -void intel_dp_ycbcr_420_enable(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) +static void +intel_dp_setup_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) { - if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420) + struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); + struct dp_sdp infoframe_sdp = {}; + struct hdmi_drm_infoframe drm_infoframe = {}; + const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE; + unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE]; + ssize_t len; + int ret; + + ret = drm_hdmi_infoframe_set_hdr_metadata(&drm_infoframe, conn_state); + if (ret) { + DRM_DEBUG_KMS("couldn't set HDR metadata in infoframe\n"); return; + } - intel_pixel_encoding_setup_vsc(intel_dp, crtc_state); + len = hdmi_drm_infoframe_pack_only(&drm_infoframe, buf, sizeof(buf)); + if (len < 0) { + DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); + return; + } + + if (len != infoframe_size) { + DRM_DEBUG_KMS("wrong static hdr metadata size\n"); + return; + } + + /* + * Set up the infoframe sdp packet for HDR static metadata. + * Prepare VSC Header for SU as per DP 1.4a spec, + * Table 2-100 and Table 2-101 + */ + + /* Packet ID, 00h for non-Audio INFOFRAME */ + infoframe_sdp.sdp_header.HB0 = 0; + /* + * Packet Type 80h + Non-audio INFOFRAME Type value + * HDMI_INFOFRAME_TYPE_DRM: 0x87, + */ + infoframe_sdp.sdp_header.HB1 = drm_infoframe.type; + /* + * Least Significant Eight Bits of (Data Byte Count – 1) + * infoframe_size - 1, + */ + infoframe_sdp.sdp_header.HB2 = 0x1D; + /* INFOFRAME SDP Version Number */ + infoframe_sdp.sdp_header.HB3 = (0x13 << 2); + /* CTA Header Byte 2 (INFOFRAME Version Number) */ + infoframe_sdp.db[0] = drm_infoframe.version; + /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */ + infoframe_sdp.db[1] = drm_infoframe.length; + /* + * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after + * HDMI_INFOFRAME_HEADER_SIZE + */ + BUILD_BUG_ON(sizeof(infoframe_sdp.db) < HDMI_DRM_INFOFRAME_SIZE + 2); + memcpy(&infoframe_sdp.db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE], + HDMI_DRM_INFOFRAME_SIZE); + + /* + * Size of DP infoframe sdp packet for HDR static metadata is consist of + * - DP SDP Header(struct dp_sdp_header): 4 bytes + * - Two Data Blocks: 2 bytes + * CTA Header Byte2 (INFOFRAME Version Number) + * CTA Header Byte3 (Length of INFOFRAME) + * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes + * + * Prior to GEN11's GMP register size is identical to DP HDR static metadata + * infoframe size. But GEN11+ has larger than that size, write_infoframe + * will pad rest of the size. + */ + intel_dig_port->write_infoframe(&intel_dig_port->base, crtc_state, + HDMI_PACKET_TYPE_GAMUT_METADATA, + &infoframe_sdp, + sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE); +} + +void intel_dp_vsc_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state)) + return; + + intel_dp_setup_vsc_sdp(intel_dp, crtc_state, conn_state); +} + +void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + if (!conn_state->hdr_output_metadata) + return; + + intel_dp_setup_hdr_metadata_infoframe_sdp(intel_dp, + crtc_state, + conn_state); } static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp) @@ -5227,6 +5481,9 @@ static bool icl_combo_port_connected(struct drm_i915_private *dev_priv, { enum port port = intel_dig_port->base.port; + if (HAS_PCH_MCC(dev_priv) && port == PORT_C) + return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1); + return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port); } @@ -5506,7 +5763,6 @@ static int intel_dp_connector_register(struct drm_connector *connector) { struct intel_dp *intel_dp = intel_attached_dp(connector); - struct drm_device *dev = connector->dev; int ret; ret = intel_connector_register(connector); @@ -5521,8 +5777,7 @@ intel_dp_connector_register(struct drm_connector *connector) intel_dp->aux.dev = connector->kdev; ret = drm_dp_aux_register(&intel_dp->aux); if (!ret) - drm_dp_cec_register_connector(&intel_dp->aux, - connector->name, dev->dev); + drm_dp_cec_register_connector(&intel_dp->aux, connector); return ret; } @@ -6280,13 +6535,15 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd) * would end up in an endless cycle of * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..." */ - DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n", - port_name(intel_dig_port->base.port)); + DRM_DEBUG_KMS("ignoring long hpd on eDP [ENCODER:%d:%s]\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); return IRQ_HANDLED; } - DRM_DEBUG_KMS("got hpd irq on port %c - %s\n", - port_name(intel_dig_port->base.port), + DRM_DEBUG_KMS("got hpd irq on [ENCODER:%d:%s] - %s\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name, long_hpd ? "long" : "short"); if (long_hpd) { @@ -6353,6 +6610,13 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect else if (INTEL_GEN(dev_priv) >= 5) drm_connector_attach_max_bpc_property(connector, 6, 12); + intel_attach_colorspace_property(connector); + + if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11) + drm_object_attach_property(&connector->base, + connector->dev->mode_config.hdr_output_metadata_property, + 0); + if (intel_dp_is_edp(intel_dp)) { u32 allowed_scalers; @@ -7150,8 +7414,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_dp_modeset_retry_work_fn); if (WARN(intel_dig_port->max_lanes < 1, - "Not enough lanes (%d) for DP on port %c\n", - intel_dig_port->max_lanes, port_name(port))) + "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n", + intel_dig_port->max_lanes, intel_encoder->base.base.id, + intel_encoder->base.name)) return false; intel_dp_set_source_rates(intel_dp); @@ -7192,9 +7457,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, port != PORT_B && port != PORT_C)) return false; - DRM_DEBUG_KMS("Adding %s connector on port %c\n", - type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", - port_name(port)); + DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n", + type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP", + intel_encoder->base.base.id, intel_encoder->base.name); drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); @@ -7218,11 +7483,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port, intel_connector->get_hw_state = intel_connector_get_hw_state; /* init MST on ports that can support it */ - if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) && - (port == PORT_B || port == PORT_C || - port == PORT_D || port == PORT_F)) - intel_dp_mst_encoder_init(intel_dig_port, - intel_connector->base.base.id); + intel_dp_mst_encoder_init(intel_dig_port, + intel_connector->base.base.id); if (!intel_edp_init_connector(intel_dp, intel_connector)) { intel_dp_aux_fini(intel_dp); @@ -7313,11 +7575,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv, intel_encoder->power_domain = intel_port_to_power_domain(port); if (IS_CHERRYVIEW(dev_priv)) { if (port == PORT_D) - intel_encoder->crtc_mask = 1 << 2; + intel_encoder->pipe_mask = BIT(PIPE_C); else - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); } else { - intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + intel_encoder->pipe_mask = ~0; } intel_encoder->cloneable = 0; intel_encoder->port = port; @@ -7378,7 +7640,8 @@ void intel_dp_mst_resume(struct drm_i915_private *dev_priv) if (!intel_dp->can_mst) continue; - ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr); + ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr, + true); if (ret) { intel_dp->is_mst = false; drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 00981fb9414b..3da166054788 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -13,6 +13,7 @@ #include "i915_reg.h" enum pipe; +enum port; struct drm_connector_state; struct drm_encoder; struct drm_i915_private; @@ -107,6 +108,14 @@ bool intel_dp_read_dpcd(struct intel_dp *intel_dp); bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp); int intel_dp_link_required(int pixel_clock, int bpp); int intel_dp_max_data_rate(int max_link_clock, int max_lanes); +bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); +void intel_dp_vsc_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); +void intel_dp_hdr_metadata_enable(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state); bool intel_digital_port_connected(struct intel_encoder *encoder); static inline unsigned int intel_dp_unused_lane_mask(int lane_count) diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 600873c796d0..03d1cba0b696 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -215,7 +215,7 @@ static void intel_mst_disable_dp(struct intel_encoder *encoder, ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); if (ret) { - DRM_ERROR("failed to update payload %d\n", ret); + DRM_DEBUG_KMS("failed to update payload %d\n", ret); } if (old_crtc_state->has_audio) intel_audio_codec_disable(encoder, @@ -295,7 +295,6 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = intel_dig_port->base.port; struct intel_connector *connector = to_intel_connector(conn_state->connector); int ret; @@ -326,12 +325,14 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder, DRM_ERROR("failed to allocate vcpi\n"); intel_dp->active_mst_links++; - temp = I915_READ(DP_TP_STATUS(port)); - I915_WRITE(DP_TP_STATUS(port), temp); + temp = I915_READ(intel_dp->regs.dp_tp_status); + I915_WRITE(intel_dp->regs.dp_tp_status, temp); ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr); intel_ddi_enable_pipe_clock(pipe_config); + + intel_ddi_set_dp_msa(pipe_config, conn_state); } static void intel_mst_enable_dp(struct intel_encoder *encoder, @@ -342,11 +343,10 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder, struct intel_digital_port *intel_dig_port = intel_mst->primary; struct intel_dp *intel_dp = &intel_dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum port port = intel_dig_port->base.port; DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links); - if (intel_de_wait_for_set(dev_priv, DP_TP_STATUS(port), + if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status, DP_TP_STATUS_ACT_SENT, 1)) DRM_ERROR("Timed out waiting for ACT sent\n"); @@ -393,20 +393,7 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector) return ret; } -static enum drm_connector_status -intel_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct intel_connector *intel_connector = to_intel_connector(connector); - struct intel_dp *intel_dp = intel_connector->mst_port; - - if (drm_connector_is_unregistered(connector)) - return connector_status_disconnected; - return drm_dp_mst_detect_port(connector, &intel_dp->mst_mgr, - intel_connector->port); -} - static const struct drm_connector_funcs intel_dp_mst_connector_funcs = { - .detect = intel_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .atomic_get_property = intel_digital_connector_atomic_get_property, .atomic_set_property = intel_digital_connector_atomic_set_property, @@ -426,6 +413,7 @@ static enum drm_mode_status intel_dp_mst_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); struct intel_dp *intel_dp = intel_connector->mst_port; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; @@ -453,7 +441,7 @@ intel_dp_mst_mode_valid(struct drm_connector *connector, if (mode_rate > max_rate || mode->clock > max_dotclk) return MODE_CLOCK_HIGH; - return MODE_OK; + return intel_mode_valid_max_plane_size(dev_priv, mode); } static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector, @@ -466,11 +454,26 @@ static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *c return &intel_dp->mst_encoders[crtc->pipe]->base.base; } +static int +intel_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) +{ + struct intel_connector *intel_connector = to_intel_connector(connector); + struct intel_dp *intel_dp = intel_connector->mst_port; + + if (drm_connector_is_unregistered(connector)) + return connector_status_disconnected; + + return drm_dp_mst_detect_port(connector, ctx, &intel_dp->mst_mgr, + intel_connector->port); +} + static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { .get_modes = intel_dp_mst_get_modes, .mode_valid = intel_dp_mst_mode_valid, .atomic_best_encoder = intel_mst_atomic_best_encoder, .atomic_check = intel_dp_mst_atomic_check, + .detect_ctx = intel_dp_mst_detect, }; static void intel_dp_mst_encoder_destroy(struct drm_encoder *encoder) @@ -615,8 +618,16 @@ intel_dp_create_fake_mst_encoder(struct intel_digital_port *intel_dig_port, enum intel_encoder->type = INTEL_OUTPUT_DP_MST; intel_encoder->power_domain = intel_dig_port->base.power_domain; intel_encoder->port = intel_dig_port->base.port; - intel_encoder->crtc_mask = 0x7; intel_encoder->cloneable = 0; + /* + * This is wrong, but broken userspace uses the intersection + * of possible_crtcs of all the encoders of a given connector + * to figure out which crtcs can drive said connector. What + * should be used instead is the union of possible_crtcs. + * To keep such userspace functioning we must misconfigure + * this to make sure the intersection is not empty :( + */ + intel_encoder->pipe_mask = ~0; intel_encoder->compute_config = intel_dp_mst_compute_config; intel_encoder->disable = intel_mst_disable_dp; @@ -653,21 +664,31 @@ intel_dp_mst_encoder_active_links(struct intel_digital_port *intel_dig_port) int intel_dp_mst_encoder_init(struct intel_digital_port *intel_dig_port, int conn_base_id) { + struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev); struct intel_dp *intel_dp = &intel_dig_port->dp; - struct drm_device *dev = intel_dig_port->base.base.dev; + enum port port = intel_dig_port->base.port; int ret; - intel_dp->can_mst = true; + if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp)) + return 0; + + if (INTEL_GEN(i915) < 12 && port == PORT_A) + return 0; + + if (INTEL_GEN(i915) < 11 && port == PORT_E) + return 0; + intel_dp->mst_mgr.cbs = &mst_cbs; /* create encoders */ intel_dp_create_fake_mst_encoders(intel_dig_port); - ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, dev, + ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm, &intel_dp->aux, 16, 3, conn_base_id); - if (ret) { - intel_dp->can_mst = false; + if (ret) return ret; - } + + intel_dp->can_mst = true; + return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index d5a298c3c83b..3ce0a023eee0 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -247,8 +247,7 @@ static struct intel_shared_dpll * intel_find_shared_dpll(struct intel_atomic_state *state, const struct intel_crtc *crtc, const struct intel_dpll_hw_state *pll_state, - enum intel_dpll_id range_min, - enum intel_dpll_id range_max) + unsigned long dpll_mask) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct intel_shared_dpll *pll, *unused_pll = NULL; @@ -257,7 +256,9 @@ intel_find_shared_dpll(struct intel_atomic_state *state, shared_dpll = intel_atomic_get_shared_dpll_state(&state->base); - for (i = range_min; i <= range_max; i++) { + WARN_ON(dpll_mask & ~(BIT(I915_NUM_PLLS) - 1)); + + for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) { pll = &dev_priv->shared_dplls[i]; /* Only want to check enabled timings first */ @@ -464,8 +465,8 @@ static bool ibx_get_dpll(struct intel_atomic_state *state, } else { pll = intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, - DPLL_ID_PCH_PLL_A, - DPLL_ID_PCH_PLL_B); + BIT(DPLL_ID_PCH_PLL_B) | + BIT(DPLL_ID_PCH_PLL_A)); } if (!pll) @@ -829,7 +830,8 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state, pll = intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, - DPLL_ID_WRPLL1, DPLL_ID_WRPLL2); + BIT(DPLL_ID_WRPLL2) | + BIT(DPLL_ID_WRPLL1)); if (!pll) return NULL; @@ -892,7 +894,7 @@ static bool hsw_get_dpll(struct intel_atomic_state *state, pll = intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, - DPLL_ID_SPLL, DPLL_ID_SPLL); + BIT(DPLL_ID_SPLL)); } else { return false; } @@ -1462,13 +1464,13 @@ static bool skl_get_dpll(struct intel_atomic_state *state, if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP)) pll = intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, - DPLL_ID_SKL_DPLL0, - DPLL_ID_SKL_DPLL0); + BIT(DPLL_ID_SKL_DPLL0)); else pll = intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, - DPLL_ID_SKL_DPLL1, - DPLL_ID_SKL_DPLL3); + BIT(DPLL_ID_SKL_DPLL3) | + BIT(DPLL_ID_SKL_DPLL2) | + BIT(DPLL_ID_SKL_DPLL1)); if (!pll) return false; @@ -2416,8 +2418,9 @@ static bool cnl_get_dpll(struct intel_atomic_state *state, pll = intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state, - DPLL_ID_SKL_DPLL0, - DPLL_ID_SKL_DPLL2); + BIT(DPLL_ID_SKL_DPLL2) | + BIT(DPLL_ID_SKL_DPLL1) | + BIT(DPLL_ID_SKL_DPLL0)); if (!pll) { DRM_DEBUG_KMS("No PLL selected\n"); return false; @@ -2535,6 +2538,18 @@ static const struct skl_wrpll_params icl_tbt_pll_19_2MHz_values = { .pdiv = 0x4 /* 5 */, .kdiv = 1, .qdiv_mode = 0, .qdiv_ratio = 0, }; +static const struct skl_wrpll_params tgl_tbt_pll_19_2MHz_values = { + .dco_integer = 0x54, .dco_fraction = 0x3000, + /* the following params are unused */ + .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0, +}; + +static const struct skl_wrpll_params tgl_tbt_pll_24MHz_values = { + .dco_integer = 0x43, .dco_fraction = 0x4000, + /* the following params are unused */ + .pdiv = 0, .kdiv = 0, .qdiv_mode = 0, .qdiv_ratio = 0, +}; + static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state, struct skl_wrpll_params *pll_params) { @@ -2562,8 +2577,34 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state, { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); - *pll_params = dev_priv->cdclk.hw.ref == 24000 ? - icl_tbt_pll_24MHz_values : icl_tbt_pll_19_2MHz_values; + if (INTEL_GEN(dev_priv) >= 12) { + switch (dev_priv->cdclk.hw.ref) { + default: + MISSING_CASE(dev_priv->cdclk.hw.ref); + /* fall-through */ + case 19200: + case 38400: + *pll_params = tgl_tbt_pll_19_2MHz_values; + break; + case 24000: + *pll_params = tgl_tbt_pll_24MHz_values; + break; + } + } else { + switch (dev_priv->cdclk.hw.ref) { + default: + MISSING_CASE(dev_priv->cdclk.hw.ref); + /* fall-through */ + case 19200: + case 38400: + *pll_params = icl_tbt_pll_19_2MHz_values; + break; + case 24000: + *pll_params = icl_tbt_pll_24MHz_values; + break; + } + } + return true; } @@ -2622,7 +2663,8 @@ enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port) static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, u32 *target_dco_khz, - struct intel_dpll_hw_state *state) + struct intel_dpll_hw_state *state, + bool is_dkl) { u32 dco_min_freq, dco_max_freq; int div1_vals[] = {7, 5, 3, 2}; @@ -2644,8 +2686,13 @@ static bool icl_mg_pll_find_divisors(int clock_khz, bool is_dp, bool use_ssc, continue; if (div2 >= 2) { + /* + * Note: a_divratio not matching TGL BSpec + * algorithm but matching hardcoded values and + * working on HW for DP alt-mode at least + */ a_divratio = is_dp ? 10 : 5; - tlinedrv = 2; + tlinedrv = is_dkl ? 1 : 2; } else { a_divratio = 5; tlinedrv = 0; @@ -2708,11 +2755,12 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, u64 tmp; bool use_ssc = false; bool is_dp = !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI); + bool is_dkl = INTEL_GEN(dev_priv) >= 12; memset(pll_state, 0, sizeof(*pll_state)); if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz, - pll_state)) { + pll_state, is_dkl)) { DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock); return false; } @@ -2720,8 +2768,11 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, m1div = 2; m2div_int = dco_khz / (refclk_khz * m1div); if (m2div_int > 255) { - m1div = 4; - m2div_int = dco_khz / (refclk_khz * m1div); + if (!is_dkl) { + m1div = 4; + m2div_int = dco_khz / (refclk_khz * m1div); + } + if (m2div_int > 255) { DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n", clock); @@ -2801,60 +2852,94 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state, } ssc_steplog = 4; - pll_state->mg_pll_div0 = (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) | - MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) | - MG_PLL_DIV0_FBDIV_INT(m2div_int); - - pll_state->mg_pll_div1 = MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) | - MG_PLL_DIV1_DITHER_DIV_2 | - MG_PLL_DIV1_NDIVRATIO(1) | - MG_PLL_DIV1_FBPREDIV(m1div); - - pll_state->mg_pll_lf = MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) | - MG_PLL_LF_AFCCNTSEL_512 | - MG_PLL_LF_GAINCTRL(1) | - MG_PLL_LF_INT_COEFF(int_coeff) | - MG_PLL_LF_PROP_COEFF(prop_coeff); - - pll_state->mg_pll_frac_lock = MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 | - MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 | - MG_PLL_FRAC_LOCK_LOCKTHRESH(10) | - MG_PLL_FRAC_LOCK_DCODITHEREN | - MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain); - if (use_ssc || m2div_rem > 0) - pll_state->mg_pll_frac_lock |= MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN; - - pll_state->mg_pll_ssc = (use_ssc ? MG_PLL_SSC_EN : 0) | - MG_PLL_SSC_TYPE(2) | - MG_PLL_SSC_STEPLENGTH(ssc_steplen) | - MG_PLL_SSC_STEPNUM(ssc_steplog) | - MG_PLL_SSC_FLLEN | - MG_PLL_SSC_STEPSIZE(ssc_stepsize); - - pll_state->mg_pll_tdc_coldst_bias = MG_PLL_TDC_COLDST_COLDSTART | - MG_PLL_TDC_COLDST_IREFINT_EN | - MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) | - MG_PLL_TDC_TDCOVCCORR_EN | - MG_PLL_TDC_TDCSEL(3); - - pll_state->mg_pll_bias = MG_PLL_BIAS_BIAS_GB_SEL(3) | - MG_PLL_BIAS_INIT_DCOAMP(0x3F) | - MG_PLL_BIAS_BIAS_BONUS(10) | - MG_PLL_BIAS_BIASCAL_EN | - MG_PLL_BIAS_CTRIM(12) | - MG_PLL_BIAS_VREF_RDAC(4) | - MG_PLL_BIAS_IREFTRIM(iref_trim); - - if (refclk_khz == 38400) { - pll_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART; - pll_state->mg_pll_bias_mask = 0; + /* write pll_state calculations */ + if (is_dkl) { + pll_state->mg_pll_div0 = DKL_PLL_DIV0_INTEG_COEFF(int_coeff) | + DKL_PLL_DIV0_PROP_COEFF(prop_coeff) | + DKL_PLL_DIV0_FBPREDIV(m1div) | + DKL_PLL_DIV0_FBDIV_INT(m2div_int); + + pll_state->mg_pll_div1 = DKL_PLL_DIV1_IREF_TRIM(iref_trim) | + DKL_PLL_DIV1_TDC_TARGET_CNT(tdc_targetcnt); + + pll_state->mg_pll_ssc = DKL_PLL_SSC_IREF_NDIV_RATIO(iref_ndiv) | + DKL_PLL_SSC_STEP_LEN(ssc_steplen) | + DKL_PLL_SSC_STEP_NUM(ssc_steplog) | + (use_ssc ? DKL_PLL_SSC_EN : 0); + + pll_state->mg_pll_bias = (m2div_frac ? DKL_PLL_BIAS_FRAC_EN_H : 0) | + DKL_PLL_BIAS_FBDIV_FRAC(m2div_frac); + + pll_state->mg_pll_tdc_coldst_bias = + DKL_PLL_TDC_SSC_STEP_SIZE(ssc_stepsize) | + DKL_PLL_TDC_FEED_FWD_GAIN(feedfwgain); + } else { - pll_state->mg_pll_tdc_coldst_bias_mask = -1U; - pll_state->mg_pll_bias_mask = -1U; - } + pll_state->mg_pll_div0 = + (m2div_rem > 0 ? MG_PLL_DIV0_FRACNEN_H : 0) | + MG_PLL_DIV0_FBDIV_FRAC(m2div_frac) | + MG_PLL_DIV0_FBDIV_INT(m2div_int); + + pll_state->mg_pll_div1 = + MG_PLL_DIV1_IREF_NDIVRATIO(iref_ndiv) | + MG_PLL_DIV1_DITHER_DIV_2 | + MG_PLL_DIV1_NDIVRATIO(1) | + MG_PLL_DIV1_FBPREDIV(m1div); + + pll_state->mg_pll_lf = + MG_PLL_LF_TDCTARGETCNT(tdc_targetcnt) | + MG_PLL_LF_AFCCNTSEL_512 | + MG_PLL_LF_GAINCTRL(1) | + MG_PLL_LF_INT_COEFF(int_coeff) | + MG_PLL_LF_PROP_COEFF(prop_coeff); + + pll_state->mg_pll_frac_lock = + MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 | + MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 | + MG_PLL_FRAC_LOCK_LOCKTHRESH(10) | + MG_PLL_FRAC_LOCK_DCODITHEREN | + MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(feedfwgain); + if (use_ssc || m2div_rem > 0) + pll_state->mg_pll_frac_lock |= + MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN; + + pll_state->mg_pll_ssc = + (use_ssc ? MG_PLL_SSC_EN : 0) | + MG_PLL_SSC_TYPE(2) | + MG_PLL_SSC_STEPLENGTH(ssc_steplen) | + MG_PLL_SSC_STEPNUM(ssc_steplog) | + MG_PLL_SSC_FLLEN | + MG_PLL_SSC_STEPSIZE(ssc_stepsize); + + pll_state->mg_pll_tdc_coldst_bias = + MG_PLL_TDC_COLDST_COLDSTART | + MG_PLL_TDC_COLDST_IREFINT_EN | + MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(iref_pulse_w) | + MG_PLL_TDC_TDCOVCCORR_EN | + MG_PLL_TDC_TDCSEL(3); + + pll_state->mg_pll_bias = + MG_PLL_BIAS_BIAS_GB_SEL(3) | + MG_PLL_BIAS_INIT_DCOAMP(0x3F) | + MG_PLL_BIAS_BIAS_BONUS(10) | + MG_PLL_BIAS_BIASCAL_EN | + MG_PLL_BIAS_CTRIM(12) | + MG_PLL_BIAS_VREF_RDAC(4) | + MG_PLL_BIAS_IREFTRIM(iref_trim); + + if (refclk_khz == 38400) { + pll_state->mg_pll_tdc_coldst_bias_mask = + MG_PLL_TDC_COLDST_COLDSTART; + pll_state->mg_pll_bias_mask = 0; + } else { + pll_state->mg_pll_tdc_coldst_bias_mask = -1U; + pll_state->mg_pll_bias_mask = -1U; + } - pll_state->mg_pll_tdc_coldst_bias &= pll_state->mg_pll_tdc_coldst_bias_mask; - pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask; + pll_state->mg_pll_tdc_coldst_bias &= + pll_state->mg_pll_tdc_coldst_bias_mask; + pll_state->mg_pll_bias &= pll_state->mg_pll_bias_mask; + } return true; } @@ -2908,7 +2993,7 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT]; struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum port port = encoder->port; - bool has_dpll4 = false; + unsigned long dpll_mask; if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) { DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n"); @@ -2917,16 +3002,19 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state, } if (IS_ELKHARTLAKE(dev_priv) && port != PORT_A) - has_dpll4 = true; + dpll_mask = + BIT(DPLL_ID_EHL_DPLL4) | + BIT(DPLL_ID_ICL_DPLL1) | + BIT(DPLL_ID_ICL_DPLL0); + else + dpll_mask = BIT(DPLL_ID_ICL_DPLL1) | BIT(DPLL_ID_ICL_DPLL0); port_dpll->pll = intel_find_shared_dpll(state, crtc, &port_dpll->hw_state, - DPLL_ID_ICL_DPLL0, - has_dpll4 ? DPLL_ID_EHL_DPLL4 - : DPLL_ID_ICL_DPLL1); + dpll_mask); if (!port_dpll->pll) { - DRM_DEBUG_KMS("No combo PHY PLL found for port %c\n", - port_name(encoder->port)); + DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n", + encoder->base.base.id, encoder->base.name); return false; } @@ -2956,8 +3044,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, port_dpll->pll = intel_find_shared_dpll(state, crtc, &port_dpll->hw_state, - DPLL_ID_ICL_TBTPLL, - DPLL_ID_ICL_TBTPLL); + BIT(DPLL_ID_ICL_TBTPLL)); if (!port_dpll->pll) { DRM_DEBUG_KMS("No TBT-ALT PLL found\n"); return false; @@ -2976,8 +3063,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state, encoder->port)); port_dpll->pll = intel_find_shared_dpll(state, crtc, &port_dpll->hw_state, - dpll_id, - dpll_id); + BIT(dpll_id)); if (!port_dpll->pll) { DRM_DEBUG_KMS("No MG PHY PLL found\n"); goto err_unreference_tbt_pll; @@ -3101,6 +3187,78 @@ out: return ret; } +static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state) +{ + const enum intel_dpll_id id = pll->info->id; + enum tc_port tc_port = icl_pll_id_to_tc_port(id); + intel_wakeref_t wakeref; + bool ret = false; + u32 val; + + wakeref = intel_display_power_get_if_enabled(dev_priv, + POWER_DOMAIN_DISPLAY_CORE); + if (!wakeref) + return false; + + val = I915_READ(MG_PLL_ENABLE(tc_port)); + if (!(val & PLL_ENABLE)) + goto out; + + /* + * All registers read here have the same HIP_INDEX_REG even though + * they are on different building blocks + */ + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); + + hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port)); + hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; + + hw_state->mg_clktop2_hsclkctl = + I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port)); + hw_state->mg_clktop2_hsclkctl &= + MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | + MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; + + hw_state->mg_clktop2_coreclkctl1 = + I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port)); + hw_state->mg_clktop2_coreclkctl1 &= + MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; + + hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port)); + hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK | + DKL_PLL_DIV0_PROP_COEFF_MASK | + DKL_PLL_DIV0_FBPREDIV_MASK | + DKL_PLL_DIV0_FBDIV_INT_MASK); + + hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port)); + hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK | + DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); + + hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port)); + hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | + DKL_PLL_SSC_STEP_LEN_MASK | + DKL_PLL_SSC_STEP_NUM_MASK | + DKL_PLL_SSC_EN); + + hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port)); + hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H | + DKL_PLL_BIAS_FBDIV_FRAC_MASK); + + hw_state->mg_pll_tdc_coldst_bias = + I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port)); + hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK | + DKL_PLL_TDC_FEED_FWD_GAIN_MASK); + + ret = true; +out: + intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref); + return ret; +} + static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, struct intel_dpll_hw_state *hw_state, @@ -3235,6 +3393,75 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv, POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port)); } +static void dkl_pll_write(struct drm_i915_private *dev_priv, + struct intel_shared_dpll *pll) +{ + struct intel_dpll_hw_state *hw_state = &pll->state.hw_state; + enum tc_port tc_port = icl_pll_id_to_tc_port(pll->info->id); + u32 val; + + /* + * All registers programmed here have the same HIP_INDEX_REG even + * though on different building block + */ + I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2)); + + /* All the registers are RMW */ + val = I915_READ(DKL_REFCLKIN_CTL(tc_port)); + val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; + val |= hw_state->mg_refclkin_ctl; + I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val); + + val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port)); + val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; + val |= hw_state->mg_clktop2_coreclkctl1; + I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val); + + val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port)); + val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | + MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | + MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | + MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); + val |= hw_state->mg_clktop2_hsclkctl; + I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val); + + val = I915_READ(DKL_PLL_DIV0(tc_port)); + val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK | + DKL_PLL_DIV0_PROP_COEFF_MASK | + DKL_PLL_DIV0_FBPREDIV_MASK | + DKL_PLL_DIV0_FBDIV_INT_MASK); + val |= hw_state->mg_pll_div0; + I915_WRITE(DKL_PLL_DIV0(tc_port), val); + + val = I915_READ(DKL_PLL_DIV1(tc_port)); + val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK | + DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); + val |= hw_state->mg_pll_div1; + I915_WRITE(DKL_PLL_DIV1(tc_port), val); + + val = I915_READ(DKL_PLL_SSC(tc_port)); + val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | + DKL_PLL_SSC_STEP_LEN_MASK | + DKL_PLL_SSC_STEP_NUM_MASK | + DKL_PLL_SSC_EN); + val |= hw_state->mg_pll_ssc; + I915_WRITE(DKL_PLL_SSC(tc_port), val); + + val = I915_READ(DKL_PLL_BIAS(tc_port)); + val &= ~(DKL_PLL_BIAS_FRAC_EN_H | + DKL_PLL_BIAS_FBDIV_FRAC_MASK); + val |= hw_state->mg_pll_bias; + I915_WRITE(DKL_PLL_BIAS(tc_port), val); + + val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port)); + val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK | + DKL_PLL_TDC_FEED_FWD_GAIN_MASK); + val |= hw_state->mg_pll_tdc_coldst_bias; + I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val); + + POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port)); +} + static void icl_pll_power_enable(struct drm_i915_private *dev_priv, struct intel_shared_dpll *pll, i915_reg_t enable_reg) @@ -3327,7 +3554,10 @@ static void mg_pll_enable(struct drm_i915_private *dev_priv, icl_pll_power_enable(dev_priv, pll, enable_reg); - icl_mg_pll_write(dev_priv, pll); + if (INTEL_GEN(dev_priv) >= 12) + dkl_pll_write(dev_priv, pll); + else + icl_mg_pll_write(dev_priv, pll); /* * DVFS pre sequence would be here, but in our driver the cdclk code @@ -3482,11 +3712,22 @@ static const struct intel_dpll_mgr ehl_pll_mgr = { .dump_hw_state = icl_dump_hw_state, }; +static const struct intel_shared_dpll_funcs dkl_pll_funcs = { + .enable = mg_pll_enable, + .disable = mg_pll_disable, + .get_hw_state = dkl_pll_get_hw_state, +}; + static const struct dpll_info tgl_plls[] = { { "DPLL 0", &combo_pll_funcs, DPLL_ID_ICL_DPLL0, 0 }, { "DPLL 1", &combo_pll_funcs, DPLL_ID_ICL_DPLL1, 0 }, { "TBT PLL", &tbt_pll_funcs, DPLL_ID_ICL_TBTPLL, 0 }, - /* TODO: Add typeC plls */ + { "TC PLL 1", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL1, 0 }, + { "TC PLL 2", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL2, 0 }, + { "TC PLL 3", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL3, 0 }, + { "TC PLL 4", &dkl_pll_funcs, DPLL_ID_ICL_MGPLL4, 0 }, + { "TC PLL 5", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL5, 0 }, + { "TC PLL 6", &dkl_pll_funcs, DPLL_ID_TGL_MGPLL6, 0 }, { }, }; @@ -3494,6 +3735,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = { .dpll_info = tgl_plls, .get_dplls = icl_get_dplls, .put_dplls = icl_put_dplls, + .update_active_dpll = icl_update_active_dpll, .dump_hw_state = icl_dump_hw_state, }; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index 104cf6d42333..2a104c64291d 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -337,6 +337,11 @@ struct intel_shared_dpll { * @info: platform specific info */ const struct dpll_info *info; + + /** + * @wakeref: In some platforms a device-level runtime pm reference may + * need to be grabbed to disable DC states while this DPLL is enabled + */ intel_wakeref_t wakeref; }; diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c new file mode 100644 index 000000000000..bb5a0e91b370 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + * + */ + +#include "i915_drv.h" +#include "intel_display_types.h" + +#define DSB_BUF_SIZE (2 * PAGE_SIZE) + +/** + * DOC: DSB + * + * A DSB (Display State Buffer) is a queue of MMIO instructions in the memory + * which can be offloaded to DSB HW in Display Controller. DSB HW is a DMA + * engine that can be programmed to download the DSB from memory. + * It allows driver to batch submit display HW programming. This helps to + * reduce loading time and CPU activity, thereby making the context switch + * faster. DSB Support added from Gen12 Intel graphics based platform. + * + * DSB's can access only the pipe, plane, and transcoder Data Island Packet + * registers. + * + * DSB HW can support only register writes (both indexed and direct MMIO + * writes). There are no registers reads possible with DSB HW engine. + */ + +/* DSB opcodes. */ +#define DSB_OPCODE_SHIFT 24 +#define DSB_OPCODE_MMIO_WRITE 0x1 +#define DSB_OPCODE_INDEXED_WRITE 0x9 +#define DSB_BYTE_EN 0xF +#define DSB_BYTE_EN_SHIFT 20 +#define DSB_REG_VALUE_MASK 0xfffff + +static inline bool is_dsb_busy(struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + return DSB_STATUS & I915_READ(DSB_CTRL(pipe, dsb->id)); +} + +static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 dsb_ctrl; + + dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id)); + if (DSB_STATUS & dsb_ctrl) { + DRM_DEBUG_KMS("DSB engine is busy.\n"); + return false; + } + + dsb_ctrl |= DSB_ENABLE; + I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl); + + POSTING_READ(DSB_CTRL(pipe, dsb->id)); + return true; +} + +static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + u32 dsb_ctrl; + + dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id)); + if (DSB_STATUS & dsb_ctrl) { + DRM_DEBUG_KMS("DSB engine is busy.\n"); + return false; + } + + dsb_ctrl &= ~DSB_ENABLE; + I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl); + + POSTING_READ(DSB_CTRL(pipe, dsb->id)); + return true; +} + +/** + * intel_dsb_get() - Allocate DSB context and return a DSB instance. + * @crtc: intel_crtc structure to get pipe info. + * + * This function provides handle of a DSB instance, for the further DSB + * operations. + * + * Returns: address of Intel_dsb instance requested for. + * Failure: Returns the same DSB instance, but without a command buffer. + */ + +struct intel_dsb * +intel_dsb_get(struct intel_crtc *crtc) +{ + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *i915 = to_i915(dev); + struct intel_dsb *dsb = &crtc->dsb; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + intel_wakeref_t wakeref; + + if (!HAS_DSB(i915)) + return dsb; + + if (atomic_add_return(1, &dsb->refcount) != 1) + return dsb; + + dsb->id = DSB1; + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE); + if (IS_ERR(obj)) { + DRM_ERROR("Gem object creation failed\n"); + goto err; + } + + vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + if (IS_ERR(vma)) { + DRM_ERROR("Vma creation failed\n"); + i915_gem_object_put(obj); + atomic_dec(&dsb->refcount); + goto err; + } + + dsb->cmd_buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC); + if (IS_ERR(dsb->cmd_buf)) { + DRM_ERROR("Command buffer creation failed\n"); + i915_vma_unpin_and_release(&vma, 0); + dsb->cmd_buf = NULL; + atomic_dec(&dsb->refcount); + goto err; + } + dsb->vma = vma; + +err: + intel_runtime_pm_put(&i915->runtime_pm, wakeref); + return dsb; +} + +/** + * intel_dsb_put() - To destroy DSB context. + * @dsb: intel_dsb structure. + * + * This function destroys the DSB context allocated by a dsb_get(), by + * unpinning and releasing the VMA object associated with it. + */ + +void intel_dsb_put(struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_i915_private *i915 = to_i915(crtc->base.dev); + + if (!HAS_DSB(i915)) + return; + + if (WARN_ON(atomic_read(&dsb->refcount) == 0)) + return; + + if (atomic_dec_and_test(&dsb->refcount)) { + i915_vma_unpin_and_release(&dsb->vma, I915_VMA_RELEASE_MAP); + dsb->cmd_buf = NULL; + dsb->free_pos = 0; + dsb->ins_start_offset = 0; + } +} + +/** + * intel_dsb_indexed_reg_write() -Write to the DSB context for auto + * increment register. + * @dsb: intel_dsb structure. + * @reg: register address. + * @val: value. + * + * This function is used for writing register-value pair in command + * buffer of DSB for auto-increment register. During command buffer overflow, + * a warning is thrown and rest all erroneous condition register programming + * is done through mmio write. + */ + +void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg, + u32 val) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 *buf = dsb->cmd_buf; + u32 reg_val; + + if (!buf) { + I915_WRITE(reg, val); + return; + } + + if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) { + DRM_DEBUG_KMS("DSB buffer overflow\n"); + return; + } + + /* + * For example the buffer will look like below for 3 dwords for auto + * increment register: + * +--------------------------------------------------------+ + * | size = 3 | offset &| value1 | value2 | value3 | zero | + * | | opcode | | | | | + * +--------------------------------------------------------+ + * + + + + + + + + * 0 4 8 12 16 20 24 + * Byte + * + * As every instruction is 8 byte aligned the index of dsb instruction + * will start always from even number while dealing with u32 array. If + * we are writing odd no of dwords, Zeros will be added in the end for + * padding. + */ + reg_val = buf[dsb->ins_start_offset + 1] & DSB_REG_VALUE_MASK; + if (reg_val != i915_mmio_reg_offset(reg)) { + /* Every instruction should be 8 byte aligned. */ + dsb->free_pos = ALIGN(dsb->free_pos, 2); + + dsb->ins_start_offset = dsb->free_pos; + + /* Update the size. */ + buf[dsb->free_pos++] = 1; + + /* Update the opcode and reg. */ + buf[dsb->free_pos++] = (DSB_OPCODE_INDEXED_WRITE << + DSB_OPCODE_SHIFT) | + i915_mmio_reg_offset(reg); + + /* Update the value. */ + buf[dsb->free_pos++] = val; + } else { + /* Update the new value. */ + buf[dsb->free_pos++] = val; + + /* Update the size. */ + buf[dsb->ins_start_offset]++; + } + + /* if number of data words is odd, then the last dword should be 0.*/ + if (dsb->free_pos & 0x1) + buf[dsb->free_pos] = 0; +} + +/** + * intel_dsb_reg_write() -Write to the DSB context for normal + * register. + * @dsb: intel_dsb structure. + * @reg: register address. + * @val: value. + * + * This function is used for writing register-value pair in command + * buffer of DSB. During command buffer overflow, a warning is thrown + * and rest all erroneous condition register programming is done + * through mmio write. + */ +void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + u32 *buf = dsb->cmd_buf; + + if (!buf) { + I915_WRITE(reg, val); + return; + } + + if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) { + DRM_DEBUG_KMS("DSB buffer overflow\n"); + return; + } + + dsb->ins_start_offset = dsb->free_pos; + buf[dsb->free_pos++] = val; + buf[dsb->free_pos++] = (DSB_OPCODE_MMIO_WRITE << DSB_OPCODE_SHIFT) | + (DSB_BYTE_EN << DSB_BYTE_EN_SHIFT) | + i915_mmio_reg_offset(reg); +} + +/** + * intel_dsb_commit() - Trigger workload execution of DSB. + * @dsb: intel_dsb structure. + * + * This function is used to do actual write to hardware using DSB. + * On errors, fall back to MMIO. Also this function help to reset the context. + */ +void intel_dsb_commit(struct intel_dsb *dsb) +{ + struct intel_crtc *crtc = container_of(dsb, typeof(*crtc), dsb); + struct drm_device *dev = crtc->base.dev; + struct drm_i915_private *dev_priv = to_i915(dev); + enum pipe pipe = crtc->pipe; + u32 tail; + + if (!dsb->free_pos) + return; + + if (!intel_dsb_enable_engine(dsb)) + goto reset; + + if (is_dsb_busy(dsb)) { + DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n"); + goto reset; + } + I915_WRITE(DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma)); + + tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES); + if (tail > dsb->free_pos * 4) + memset(&dsb->cmd_buf[dsb->free_pos], 0, + (tail - dsb->free_pos * 4)); + + if (is_dsb_busy(dsb)) { + DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n"); + goto reset; + } + DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n", + i915_ggtt_offset(dsb->vma), tail); + I915_WRITE(DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail); + if (wait_for(!is_dsb_busy(dsb), 1)) { + DRM_ERROR("Timed out waiting for DSB workload completion.\n"); + goto reset; + } + +reset: + dsb->free_pos = 0; + dsb->ins_start_offset = 0; + intel_dsb_disable_engine(dsb); +} diff --git a/drivers/gpu/drm/i915/display/intel_dsb.h b/drivers/gpu/drm/i915/display/intel_dsb.h new file mode 100644 index 000000000000..6f95c8e909e6 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dsb.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef _INTEL_DSB_H +#define _INTEL_DSB_H + +#include <linux/types.h> + +#include "i915_reg.h" + +struct intel_crtc; +struct i915_vma; + +enum dsb_id { + INVALID_DSB = -1, + DSB1, + DSB2, + DSB3, + MAX_DSB_PER_PIPE +}; + +struct intel_dsb { + atomic_t refcount; + enum dsb_id id; + u32 *cmd_buf; + struct i915_vma *vma; + + /* + * free_pos will point the first free entry position + * and help in calculating tail of command buffer. + */ + int free_pos; + + /* + * ins_start_offset will help to store start address of the dsb + * instuction and help in identifying the batch of auto-increment + * register. + */ + u32 ins_start_offset; +}; + +struct intel_dsb * +intel_dsb_get(struct intel_crtc *crtc); +void intel_dsb_put(struct intel_dsb *dsb); +void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val); +void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg, + u32 val); +void intel_dsb_commit(struct intel_dsb *dsb); + +#endif diff --git a/drivers/gpu/drm/i915/display/intel_dsi.c b/drivers/gpu/drm/i915/display/intel_dsi.c index 5fec02aceaed..a2a937109a5a 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi.c +++ b/drivers/gpu/drm/i915/display/intel_dsi.c @@ -55,6 +55,7 @@ int intel_dsi_get_modes(struct drm_connector *connector) enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct drm_i915_private *dev_priv = to_i915(connector->dev); struct intel_connector *intel_connector = to_intel_connector(connector); const struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode; int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; @@ -73,7 +74,7 @@ enum drm_mode_status intel_dsi_mode_valid(struct drm_connector *connector, return MODE_CLOCK_HIGH; } - return MODE_OK; + return intel_mode_valid_max_plane_size(dev_priv, mode); } struct intel_dsi_host *intel_dsi_host_init(struct intel_dsi *intel_dsi, diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c index 93baf366692e..bcfbcb743e7d 100644 --- a/drivers/gpu/drm/i915/display/intel_dvo.c +++ b/drivers/gpu/drm/i915/display/intel_dvo.c @@ -280,7 +280,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder, struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; struct intel_dvo *intel_dvo = enc_to_dvo(encoder); - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; u32 dvo_val; i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg; i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg; @@ -505,7 +505,7 @@ void intel_dvo_init(struct drm_i915_private *dev_priv) intel_encoder->type = INTEL_OUTPUT_DVO; intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER; intel_encoder->port = port; - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->pipe_mask = ~0; switch (dvo->type) { case INTEL_DVO_CHIP_TMDS: diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 16ed44bfd734..3111ecaeabd0 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -343,8 +343,8 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv) HSW_FBCQ_DIS); } - if (IS_GEN(dev_priv, 11)) - /* Wa_1409120013:icl,ehl */ + if (INTEL_GEN(dev_priv) >= 11) + /* Wa_1409120013:icl,ehl,tgl */ I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL); I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); @@ -1320,6 +1320,9 @@ void intel_fbc_init(struct drm_i915_private *dev_priv) fbc->enabled = false; fbc->active = false; + if (!drm_mm_initialized(&dev_priv->mm.stolen)) + mkwrite_device_info(dev_priv)->display.has_fbc = false; + if (need_fbc_vtd_wa(dev_priv)) mkwrite_device_info(dev_priv)->display.has_fbc = false; diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c index b5c588e511dd..48c960ca12fb 100644 --- a/drivers/gpu/drm/i915/display/intel_fbdev.c +++ b/drivers/gpu/drm/i915/display/intel_fbdev.c @@ -141,10 +141,10 @@ static int intelfb_alloc(struct drm_fb_helper *helper, /* If the FB is too big, just don't use it since fbdev is not very * important and we should probably use that space with FBC or other * features. */ - obj = NULL; + obj = ERR_PTR(-ENODEV); if (size * 2 < dev_priv->stolen_usable_size) obj = i915_gem_object_create_stolen(dev_priv, size); - if (obj == NULL) + if (IS_ERR(obj)) obj = i915_gem_object_create_shmem(dev_priv, size); if (IS_ERR(obj)) { DRM_ERROR("failed to allocate framebuffer\n"); @@ -204,7 +204,6 @@ static int intelfb_create(struct drm_fb_helper *helper, sizes->fb_height = intel_fb->base.height; } - mutex_lock(&dev->struct_mutex); wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); /* Pin the GGTT vma for our access via info->screen_base. @@ -267,7 +266,6 @@ static int intelfb_create(struct drm_fb_helper *helper, ifbdev->vma_flags = flags; intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(pdev, info); return 0; @@ -275,7 +273,6 @@ out_unpin: intel_unpin_fb_vma(vma, flags); out_unlock: intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - mutex_unlock(&dev->struct_mutex); return ret; } @@ -292,11 +289,8 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) drm_fb_helper_fini(&ifbdev->helper); - if (ifbdev->vma) { - mutex_lock(&ifbdev->helper.dev->struct_mutex); + if (ifbdev->vma) intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags); - mutex_unlock(&ifbdev->helper.dev->struct_mutex); - } if (ifbdev->fb) drm_framebuffer_remove(&ifbdev->fb->base); @@ -445,7 +439,7 @@ int intel_fbdev_init(struct drm_device *dev) struct intel_fbdev *ifbdev; int ret; - if (WARN_ON(!HAS_DISPLAY(dev_priv))) + if (WARN_ON(!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv))) return -ENODEV; ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL); diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c index 719379774fa5..84b164f31895 100644 --- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c @@ -206,6 +206,7 @@ static int frontbuffer_active(struct i915_active *ref) return 0; } +__i915_active_call static void frontbuffer_retire(struct i915_active *ref) { struct intel_frontbuffer *front = @@ -220,11 +221,18 @@ static void frontbuffer_release(struct kref *ref) { struct intel_frontbuffer *front = container_of(ref, typeof(*front), ref); + struct drm_i915_gem_object *obj = front->obj; + struct i915_vma *vma; - front->obj->frontbuffer = NULL; - spin_unlock(&to_i915(front->obj->base.dev)->fb_tracking.lock); + spin_lock(&obj->vma.lock); + for_each_ggtt_vma(vma, obj) + vma->display_alignment = I915_GTT_MIN_ALIGNMENT; + spin_unlock(&obj->vma.lock); - i915_gem_object_put(front->obj); + obj->frontbuffer = NULL; + spin_unlock(&to_i915(obj->base.dev)->fb_tracking.lock); + + i915_gem_object_put(obj); kfree(front); } @@ -249,8 +257,9 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj) front->obj = obj; kref_init(&front->ref); atomic_set(&front->bits, 0); - i915_active_init(i915, &front->write, - frontbuffer_active, frontbuffer_retire); + i915_active_init(&front->write, + frontbuffer_active, + i915_active_may_sleep(frontbuffer_retire)); spin_lock(&i915->fb_tracking.lock); if (obj->frontbuffer) { diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index d6775a005726..3d4d19ac1d14 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -836,7 +836,7 @@ int intel_gmbus_setup(struct drm_i915_private *dev_priv) unsigned int pin; int ret; - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) return 0; if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 6ec5ceeab601..f1f41ca8402b 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -1,9 +1,11 @@ /* SPDX-License-Identifier: MIT */ /* * Copyright (C) 2017 Google, Inc. + * Copyright _ 2017-2019, Intel Corporation. * * Authors: * Sean Paul <seanpaul@chromium.org> + * Ramalingam C <ramalingam.c@intel.com> */ #include <linux/component.h> @@ -18,6 +20,7 @@ #include "intel_display_types.h" #include "intel_hdcp.h" #include "intel_sideband.h" +#include "intel_connector.h" #define KEY_LOAD_TRIES 5 #define ENCRYPT_STATUS_CHANGE_TIMEOUT_MS 50 @@ -105,24 +108,20 @@ bool intel_hdcp2_capable(struct intel_connector *connector) return capable; } -static inline bool intel_hdcp_in_use(struct intel_connector *connector) +static inline +bool intel_hdcp_in_use(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, enum port port) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - enum port port = connector->encoder->port; - u32 reg; - - reg = I915_READ(PORT_HDCP_STATUS(port)); - return reg & HDCP_STATUS_ENC; + return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) & + HDCP_STATUS_ENC; } -static inline bool intel_hdcp2_in_use(struct intel_connector *connector) +static inline +bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, enum port port) { - struct drm_i915_private *dev_priv = to_i915(connector->base.dev); - enum port port = connector->encoder->port; - u32 reg; - - reg = I915_READ(HDCP2_STATUS_DDI(port)); - return reg & LINK_ENCRYPTION_STATUS; + return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + LINK_ENCRYPTION_STATUS; } static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port, @@ -253,9 +252,29 @@ static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text) } static -u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port) +u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv, + enum transcoder cpu_transcoder, enum port port) { - enum port port = intel_dig_port->base.port; + if (INTEL_GEN(dev_priv) >= 12) { + switch (cpu_transcoder) { + case TRANSCODER_A: + return HDCP_TRANSA_REP_PRESENT | + HDCP_TRANSA_SHA1_M0; + case TRANSCODER_B: + return HDCP_TRANSB_REP_PRESENT | + HDCP_TRANSB_SHA1_M0; + case TRANSCODER_C: + return HDCP_TRANSC_REP_PRESENT | + HDCP_TRANSC_SHA1_M0; + case TRANSCODER_D: + return HDCP_TRANSD_REP_PRESENT | + HDCP_TRANSD_SHA1_M0; + default: + DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder); + return -EINVAL; + } + } + switch (port) { case PORT_A: return HDCP_DDIA_REP_PRESENT | HDCP_DDIA_SHA1_M0; @@ -268,18 +287,20 @@ u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port) case PORT_E: return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0; default: - break; + DRM_ERROR("Unknown port %d\n", port); + return -EINVAL; } - DRM_ERROR("Unknown port %d\n", port); - return -EINVAL; } static -int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, +int intel_hdcp_validate_v_prime(struct intel_connector *connector, const struct intel_hdcp_shim *shim, u8 *ksv_fifo, u8 num_downstream, u8 *bstatus) { + struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); struct drm_i915_private *dev_priv; + enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; + enum port port = intel_dig_port->base.port; u32 vprime, sha_text, sha_leftovers, rep_ctl; int ret, i, j, sha_idx; @@ -306,7 +327,7 @@ int intel_hdcp_validate_v_prime(struct intel_digital_port *intel_dig_port, sha_idx = 0; sha_text = 0; sha_leftovers = 0; - rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port); + rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port); I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32); for (i = 0; i < num_downstream; i++) { unsigned int sha_empty; @@ -548,7 +569,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector) * V prime atleast twice. */ for (i = 0; i < tries; i++) { - ret = intel_hdcp_validate_v_prime(intel_dig_port, shim, + ret = intel_hdcp_validate_v_prime(connector, shim, ksv_fifo, num_downstream, bstatus); if (!ret) @@ -576,6 +597,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) struct drm_device *dev = connector->base.dev; const struct intel_hdcp_shim *shim = hdcp->shim; struct drm_i915_private *dev_priv; + enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; enum port port; unsigned long r0_prime_gen_start; int ret, i, tries = 2; @@ -615,18 +637,21 @@ static int intel_hdcp_auth(struct intel_connector *connector) /* Initialize An with 2 random values and acquire it */ for (i = 0; i < 2; i++) - I915_WRITE(PORT_HDCP_ANINIT(port), get_random_u32()); - I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_CAPTURE_AN); + I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port), + get_random_u32()); + I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), + HDCP_CONF_CAPTURE_AN); /* Wait for An to be acquired */ - if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port), + if (intel_de_wait_for_set(dev_priv, + HDCP_STATUS(dev_priv, cpu_transcoder, port), HDCP_STATUS_AN_READY, 1)) { DRM_ERROR("Timed out waiting for An\n"); return -ETIMEDOUT; } - an.reg[0] = I915_READ(PORT_HDCP_ANLO(port)); - an.reg[1] = I915_READ(PORT_HDCP_ANHI(port)); + an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port)); + an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port)); ret = shim->write_an_aksv(intel_dig_port, an.shim); if (ret) return ret; @@ -644,24 +669,26 @@ static int intel_hdcp_auth(struct intel_connector *connector) return -EPERM; } - I915_WRITE(PORT_HDCP_BKSVLO(port), bksv.reg[0]); - I915_WRITE(PORT_HDCP_BKSVHI(port), bksv.reg[1]); + I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]); + I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]); ret = shim->repeater_present(intel_dig_port, &repeater_present); if (ret) return ret; if (repeater_present) I915_WRITE(HDCP_REP_CTL, - intel_hdcp_get_repeater_ctl(intel_dig_port)); + intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, + port)); ret = shim->toggle_signalling(intel_dig_port, true); if (ret) return ret; - I915_WRITE(PORT_HDCP_CONF(port), HDCP_CONF_AUTH_AND_ENC); + I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), + HDCP_CONF_AUTH_AND_ENC); /* Wait for R0 ready */ - if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) & + if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) & (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { DRM_ERROR("Timed out waiting for R0 ready\n"); return -ETIMEDOUT; @@ -689,22 +716,25 @@ static int intel_hdcp_auth(struct intel_connector *connector) ret = shim->read_ri_prime(intel_dig_port, ri.shim); if (ret) return ret; - I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg); + I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg); /* Wait for Ri prime match */ - if (!wait_for(I915_READ(PORT_HDCP_STATUS(port)) & + if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, + port)) & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) break; } if (i == tries) { DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n", - I915_READ(PORT_HDCP_STATUS(port))); + I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, + port))); return -ETIMEDOUT; } /* Wait for encryption confirmation */ - if (intel_de_wait_for_set(dev_priv, PORT_HDCP_STATUS(port), + if (intel_de_wait_for_set(dev_priv, + HDCP_STATUS(dev_priv, cpu_transcoder, port), HDCP_STATUS_ENC, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { DRM_ERROR("Timed out waiting for encryption\n"); @@ -729,15 +759,17 @@ static int _intel_hdcp_disable(struct intel_connector *connector) struct drm_i915_private *dev_priv = connector->base.dev->dev_private; struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); enum port port = intel_dig_port->base.port; + enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n", connector->base.name, connector->base.base.id); hdcp->hdcp_encrypted = false; - I915_WRITE(PORT_HDCP_CONF(port), 0); - if (intel_de_wait_for_clear(dev_priv, PORT_HDCP_STATUS(port), ~0, - ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { + I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0); + if (intel_de_wait_for_clear(dev_priv, + HDCP_STATUS(dev_priv, cpu_transcoder, port), + ~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) { DRM_ERROR("Failed to disable HDCP, timeout clearing status\n"); return -ETIMEDOUT; } @@ -808,9 +840,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector) struct drm_i915_private *dev_priv = connector->base.dev->dev_private; struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector); enum port port = intel_dig_port->base.port; + enum transcoder cpu_transcoder; int ret = 0; mutex_lock(&hdcp->mutex); + cpu_transcoder = hdcp->cpu_transcoder; /* Check_link valid only when HDCP1.4 is enabled */ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || @@ -819,10 +853,11 @@ static int intel_hdcp_check_link(struct intel_connector *connector) goto out; } - if (WARN_ON(!intel_hdcp_in_use(connector))) { + if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) { DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n", connector->base.name, connector->base.base.id, - I915_READ(PORT_HDCP_STATUS(port))); + I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, + port))); ret = -ENXIO; hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; schedule_work(&hdcp->prop_work); @@ -887,7 +922,7 @@ static void intel_hdcp_prop_work(struct work_struct *work) bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port) { /* PORT E doesn't have HDCP, and PORT F is disabled */ - return INTEL_GEN(dev_priv) >= 9 && port < PORT_E; + return INTEL_INFO(dev_priv)->display.has_hdcp && port < PORT_E; } static int @@ -1493,10 +1528,11 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = connector->encoder->port; + enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; - WARN_ON(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS); - + WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + LINK_ENCRYPTION_STATUS); if (hdcp->shim->toggle_signalling) { ret = hdcp->shim->toggle_signalling(intel_dig_port, true); if (ret) { @@ -1506,14 +1542,18 @@ static int hdcp2_enable_encryption(struct intel_connector *connector) } } - if (I915_READ(HDCP2_STATUS_DDI(port)) & LINK_AUTH_STATUS) { + if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + LINK_AUTH_STATUS) { /* Link is Authenticated. Now set for Encryption */ - I915_WRITE(HDCP2_CTL_DDI(port), - I915_READ(HDCP2_CTL_DDI(port)) | + I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port), + I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, + port)) | CTL_LINK_ENCRYPTION_REQ); } - ret = intel_de_wait_for_set(dev_priv, HDCP2_STATUS_DDI(port), + ret = intel_de_wait_for_set(dev_priv, + HDCP2_STATUS(dev_priv, cpu_transcoder, + port), LINK_ENCRYPTION_STATUS, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); @@ -1526,14 +1566,19 @@ static int hdcp2_disable_encryption(struct intel_connector *connector) struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = connector->encoder->port; + enum transcoder cpu_transcoder = hdcp->cpu_transcoder; int ret; - WARN_ON(!(I915_READ(HDCP2_STATUS_DDI(port)) & LINK_ENCRYPTION_STATUS)); + WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) & + LINK_ENCRYPTION_STATUS)); - I915_WRITE(HDCP2_CTL_DDI(port), - I915_READ(HDCP2_CTL_DDI(port)) & ~CTL_LINK_ENCRYPTION_REQ); + I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port), + I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) & + ~CTL_LINK_ENCRYPTION_REQ); - ret = intel_de_wait_for_clear(dev_priv, HDCP2_STATUS_DDI(port), + ret = intel_de_wait_for_clear(dev_priv, + HDCP2_STATUS(dev_priv, cpu_transcoder, + port), LINK_ENCRYPTION_STATUS, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS); if (ret == -ETIMEDOUT) @@ -1632,9 +1677,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; enum port port = connector->encoder->port; + enum transcoder cpu_transcoder; int ret = 0; mutex_lock(&hdcp->mutex); + cpu_transcoder = hdcp->cpu_transcoder; /* hdcp2_check_link is expected only when HDCP2.2 is Enabled */ if (hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED || @@ -1643,9 +1690,10 @@ static int intel_hdcp2_check_link(struct intel_connector *connector) goto out; } - if (WARN_ON(!intel_hdcp2_in_use(connector))) { + if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) { DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n", - I915_READ(HDCP2_STATUS_DDI(port))); + I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, + port))); ret = -ENXIO; hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; schedule_work(&hdcp->prop_work); @@ -1749,13 +1797,71 @@ static const struct component_ops i915_hdcp_component_ops = { .unbind = i915_hdcp_component_unbind, }; +static inline +enum mei_fw_ddi intel_get_mei_fw_ddi_index(enum port port) +{ + switch (port) { + case PORT_A: + return MEI_DDI_A; + case PORT_B ... PORT_F: + return (enum mei_fw_ddi)port; + default: + return MEI_DDI_INVALID_PORT; + } +} + +static inline +enum mei_fw_tc intel_get_mei_fw_tc(enum transcoder cpu_transcoder) +{ + switch (cpu_transcoder) { + case TRANSCODER_A ... TRANSCODER_D: + return (enum mei_fw_tc)(cpu_transcoder | 0x10); + default: /* eDP, DSI TRANSCODERS are non HDCP capable */ + return MEI_INVALID_TRANSCODER; + } +} + +void intel_hdcp_transcoder_config(struct intel_connector *connector, + enum transcoder cpu_transcoder) +{ + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_hdcp *hdcp = &connector->hdcp; + + if (!hdcp->shim) + return; + + if (INTEL_GEN(dev_priv) >= 12) { + mutex_lock(&hdcp->mutex); + hdcp->cpu_transcoder = cpu_transcoder; + hdcp->port_data.fw_tc = intel_get_mei_fw_tc(cpu_transcoder); + mutex_unlock(&hdcp->mutex); + } +} + static inline int initialize_hdcp_port_data(struct intel_connector *connector, const struct intel_hdcp_shim *shim) { + struct drm_i915_private *dev_priv = to_i915(connector->base.dev); struct intel_hdcp *hdcp = &connector->hdcp; struct hdcp_port_data *data = &hdcp->port_data; - data->port = connector->encoder->port; + if (INTEL_GEN(dev_priv) < 12) + data->fw_ddi = + intel_get_mei_fw_ddi_index(connector->encoder->port); + else + /* + * As per ME FW API expectation, for GEN 12+, fw_ddi is filled + * with zero(INVALID PORT index). + */ + data->fw_ddi = MEI_DDI_INVALID_PORT; + + /* + * As associated transcoder is set and modified at modeset, here fw_tc + * is initialized to zero (invalid transcoder index). This will be + * retained for <Gen12 forever. + */ + data->fw_tc = MEI_INVALID_TRANSCODER; + data->port_type = (u8)HDCP_PORT_TYPE_INTEGRATED; data->protocol = (u8)shim->protocol; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h index 13555b054930..41c1053d9e38 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.h +++ b/drivers/gpu/drm/i915/display/intel_hdcp.h @@ -15,10 +15,14 @@ struct drm_connector_state; struct drm_i915_private; struct intel_connector; struct intel_hdcp_shim; +enum port; +enum transcoder; void intel_hdcp_atomic_check(struct drm_connector *connector, struct drm_connector_state *old_state, struct drm_connector_state *new_state); +void intel_hdcp_transcoder_config(struct intel_connector *connector, + enum transcoder cpu_transcoder); int intel_hdcp_init(struct intel_connector *connector, const struct intel_hdcp_shim *hdcp_shim); int intel_hdcp_enable(struct intel_connector *connector, u8 content_type); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index b030f7ae3302..f6f5312205c4 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -189,13 +189,19 @@ hsw_dip_data_reg(struct drm_i915_private *dev_priv, } } -static int hsw_dip_data_size(unsigned int type) +static int hsw_dip_data_size(struct drm_i915_private *dev_priv, + unsigned int type) { switch (type) { case DP_SDP_VSC: return VIDEO_DIP_VSC_DATA_SIZE; case DP_SDP_PPS: return VIDEO_DIP_PPS_DATA_SIZE; + case HDMI_PACKET_TYPE_GAMUT_METADATA: + if (INTEL_GEN(dev_priv) >= 11) + return VIDEO_DIP_GMP_DATA_SIZE; + else + return VIDEO_DIP_DATA_SIZE; default: return VIDEO_DIP_DATA_SIZE; } @@ -514,7 +520,9 @@ static void hsw_write_infoframe(struct intel_encoder *encoder, int i; u32 val = I915_READ(ctl_reg); - data_size = hsw_dip_data_size(type); + data_size = hsw_dip_data_size(dev_priv, type); + + WARN_ON(len > data_size); val &= ~hsw_infoframe_enable(type); I915_WRITE(ctl_reg, val); @@ -724,11 +732,20 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder, drm_hdmi_avi_infoframe_colorspace(frame, conn_state); - drm_hdmi_avi_infoframe_quant_range(frame, connector, - adjusted_mode, - crtc_state->limited_color_range ? - HDMI_QUANTIZATION_RANGE_LIMITED : - HDMI_QUANTIZATION_RANGE_FULL); + /* nonsense combination */ + WARN_ON(crtc_state->limited_color_range && + crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB); + + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) { + drm_hdmi_avi_infoframe_quant_range(frame, connector, + adjusted_mode, + crtc_state->limited_color_range ? + HDMI_QUANTIZATION_RANGE_LIMITED : + HDMI_QUANTIZATION_RANGE_FULL); + } else { + frame->quantization_range = HDMI_QUANTIZATION_RANGE_DEFAULT; + frame->ycc_quantization_range = HDMI_YCC_QUANTIZATION_RANGE_LIMITED; + } drm_hdmi_avi_infoframe_content_type(frame, conn_state); @@ -1491,7 +1508,10 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) { struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private; + struct intel_connector *connector = + intel_dig_port->hdmi.attached_connector; enum port port = intel_dig_port->base.port; + enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder; int ret; union { u32 reg; @@ -1502,39 +1522,30 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port) if (ret) return false; - I915_WRITE(PORT_HDCP_RPRIME(port), ri.reg); + I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg); /* Wait for Ri prime match */ - if (wait_for(I915_READ(PORT_HDCP_STATUS(port)) & + if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) { DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n", - I915_READ(PORT_HDCP_STATUS(port))); + I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, + port))); return false; } return true; } -struct hdcp2_hdmi_msg_data { +struct hdcp2_hdmi_msg_timeout { u8 msg_id; - u32 timeout; - u32 timeout2; + u16 timeout; }; -static const struct hdcp2_hdmi_msg_data hdcp2_msg_data[] = { - { HDCP_2_2_AKE_INIT, 0, 0 }, - { HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0 }, - { HDCP_2_2_AKE_NO_STORED_KM, 0, 0 }, - { HDCP_2_2_AKE_STORED_KM, 0, 0 }, - { HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS, - HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS }, - { HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, 0 }, - { HDCP_2_2_LC_INIT, 0, 0 }, - { HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0 }, - { HDCP_2_2_SKE_SEND_EKS, 0, 0 }, - { HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0 }, - { HDCP_2_2_REP_SEND_ACK, 0, 0 }, - { HDCP_2_2_REP_STREAM_MANAGE, 0, 0 }, - { HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0 }, +static const struct hdcp2_hdmi_msg_timeout hdcp2_msg_timeout[] = { + { HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, }, + { HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS, }, + { HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, }, + { HDCP_2_2_REP_SEND_RECVID_LIST, HDCP_2_2_RECVID_LIST_TIMEOUT_MS, }, + { HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS, }, }; static @@ -1551,12 +1562,17 @@ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired) { int i; - for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++) - if (hdcp2_msg_data[i].msg_id == msg_id && - (msg_id != HDCP_2_2_AKE_SEND_HPRIME || is_paired)) - return hdcp2_msg_data[i].timeout; - else if (hdcp2_msg_data[i].msg_id == msg_id) - return hdcp2_msg_data[i].timeout2; + if (msg_id == HDCP_2_2_AKE_SEND_HPRIME) { + if (is_paired) + return HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS; + else + return HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS; + } + + for (i = 0; i < ARRAY_SIZE(hdcp2_msg_timeout); i++) { + if (hdcp2_msg_timeout[i].msg_id == msg_id) + return hdcp2_msg_timeout[i].timeout; + } return -EINVAL; } @@ -2184,8 +2200,10 @@ intel_hdmi_mode_valid(struct drm_connector *connector, status = hdmi_port_clock_valid(hdmi, clock * 5 / 4, true, force_dvi); } + if (status != MODE_OK) + return status; - return status; + return intel_mode_valid_max_plane_size(dev_priv, mode); } static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, @@ -2261,9 +2279,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, static bool intel_hdmi_ycbcr420_config(struct drm_connector *connector, - struct intel_crtc_state *config, - int *clock_12bpc, int *clock_10bpc, - int *clock_8bpc) + struct intel_crtc_state *config) { struct intel_crtc *intel_crtc = to_intel_crtc(config->base.crtc); @@ -2272,11 +2288,6 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, return false; } - /* YCBCR420 TMDS rate requirement is half the pixel clock */ - config->port_clock /= 2; - *clock_12bpc /= 2; - *clock_10bpc /= 2; - *clock_8bpc /= 2; config->output_format = INTEL_OUTPUT_FORMAT_YCBCR420; /* YCBCR 420 output conversion needs a scaler */ @@ -2291,6 +2302,104 @@ intel_hdmi_ycbcr420_config(struct drm_connector *connector, return true; } +static int intel_hdmi_port_clock(int clock, int bpc) +{ + /* + * Need to adjust the port link by: + * 1.5x for 12bpc + * 1.25x for 10bpc + */ + return clock * bpc / 8; +} + +static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + int clock, bool force_dvi) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + int bpc; + + for (bpc = 12; bpc >= 10; bpc -= 2) { + if (hdmi_deep_color_possible(crtc_state, bpc) && + hdmi_port_clock_valid(intel_hdmi, + intel_hdmi_port_clock(clock, bpc), + true, force_dvi) == MODE_OK) + return bpc; + } + + return 8; +} + +static int intel_hdmi_compute_clock(struct intel_encoder *encoder, + struct intel_crtc_state *crtc_state, + bool force_dvi) +{ + struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + int bpc, clock = adjusted_mode->crtc_clock; + + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) + clock *= 2; + + /* YCBCR420 TMDS rate requirement is half the pixel clock */ + if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420) + clock /= 2; + + bpc = intel_hdmi_compute_bpc(encoder, crtc_state, + clock, force_dvi); + + crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc); + + /* + * pipe_bpp could already be below 8bpc due to + * FDI bandwidth constraints. We shouldn't bump it + * back up to 8bpc in that case. + */ + if (crtc_state->pipe_bpp > bpc * 3) + crtc_state->pipe_bpp = bpc * 3; + + DRM_DEBUG_KMS("picking %d bpc for HDMI output (pipe bpp: %d)\n", + bpc, crtc_state->pipe_bpp); + + if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock, + false, force_dvi) != MODE_OK) { + DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n", + crtc_state->port_clock); + return -EINVAL; + } + + return 0; +} + +static bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, + const struct drm_connector_state *conn_state) +{ + const struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + const struct drm_display_mode *adjusted_mode = + &crtc_state->base.adjusted_mode; + + /* + * Our YCbCr output is always limited range. + * crtc_state->limited_color_range only applies to RGB, + * and it must never be set for YCbCr or we risk setting + * some conflicting bits in PIPECONF which will mess up + * the colors on the monitor. + */ + if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) + return false; + + if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { + /* See CEA-861-E - 5.1 Default Encoding Parameters */ + return crtc_state->has_hdmi_sink && + drm_default_rgb_quant_range(adjusted_mode) == + HDMI_QUANTIZATION_RANGE_LIMITED; + } else { + return intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED; + } +} + int intel_hdmi_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -2302,11 +2411,8 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, struct drm_scdc *scdc = &connector->display_info.hdmi.scdc; struct intel_digital_connector_state *intel_conn_state = to_intel_digital_connector_state(conn_state); - int clock_8bpc = pipe_config->base.adjusted_mode.crtc_clock; - int clock_10bpc = clock_8bpc * 5 / 4; - int clock_12bpc = clock_8bpc * 3 / 2; - int desired_bpp; bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI; + int ret; if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN) return -EINVAL; @@ -2317,33 +2423,19 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, if (pipe_config->has_hdmi_sink) pipe_config->has_infoframe = true; - if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { - /* See CEA-861-E - 5.1 Default Encoding Parameters */ - pipe_config->limited_color_range = - pipe_config->has_hdmi_sink && - drm_default_rgb_quant_range(adjusted_mode) == - HDMI_QUANTIZATION_RANGE_LIMITED; - } else { - pipe_config->limited_color_range = - intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_LIMITED; - } - - if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) { + if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) pipe_config->pixel_multiplier = 2; - clock_8bpc *= 2; - clock_10bpc *= 2; - clock_12bpc *= 2; - } if (drm_mode_is_420_only(&connector->display_info, adjusted_mode)) { - if (!intel_hdmi_ycbcr420_config(connector, pipe_config, - &clock_12bpc, &clock_10bpc, - &clock_8bpc)) { + if (!intel_hdmi_ycbcr420_config(connector, pipe_config)) { DRM_ERROR("Can't support YCBCR420 output\n"); return -EINVAL; } } + pipe_config->limited_color_range = + intel_hdmi_limited_color_range(pipe_config, conn_state); + if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv)) pipe_config->has_pch_encoder = true; @@ -2355,43 +2447,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, intel_conn_state->force_audio == HDMI_AUDIO_ON; } - /* - * Note that g4x/vlv don't support 12bpc hdmi outputs. We also need - * to check that the higher clock still fits within limits. - */ - if (hdmi_deep_color_possible(pipe_config, 12) && - hdmi_port_clock_valid(intel_hdmi, clock_12bpc, - true, force_dvi) == MODE_OK) { - DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); - desired_bpp = 12*3; - - /* Need to adjust the port link by 1.5x for 12bpc. */ - pipe_config->port_clock = clock_12bpc; - } else if (hdmi_deep_color_possible(pipe_config, 10) && - hdmi_port_clock_valid(intel_hdmi, clock_10bpc, - true, force_dvi) == MODE_OK) { - DRM_DEBUG_KMS("picking bpc to 10 for HDMI output\n"); - desired_bpp = 10 * 3; - - /* Need to adjust the port link by 1.25x for 10bpc. */ - pipe_config->port_clock = clock_10bpc; - } else { - DRM_DEBUG_KMS("picking bpc to 8 for HDMI output\n"); - desired_bpp = 8*3; - - pipe_config->port_clock = clock_8bpc; - } - - if (!pipe_config->bw_constrained) { - DRM_DEBUG_KMS("forcing pipe bpp to %i for HDMI\n", desired_bpp); - pipe_config->pipe_bpp = desired_bpp; - } - - if (hdmi_port_clock_valid(intel_hdmi, pipe_config->port_clock, - false, force_dvi) != MODE_OK) { - DRM_DEBUG_KMS("unsupported HDMI clock, rejecting mode\n"); - return -EINVAL; - } + ret = intel_hdmi_compute_clock(encoder, pipe_config, force_dvi); + if (ret) + return ret; /* Set user selected PAR to incoming mode's member */ adjusted_mode->picture_aspect_ratio = conn_state->picture_aspect_ratio; @@ -2431,6 +2489,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, return -EINVAL; } + intel_hdcp_transcoder_config(intel_hdmi->attached_connector, + pipe_config->cpu_transcoder); + return 0; } @@ -2757,8 +2818,9 @@ intel_hdmi_connector_register(struct drm_connector *connector) static void intel_hdmi_destroy(struct drm_connector *connector) { - if (intel_attached_hdmi(connector)->cec_notifier) - cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier); + struct cec_notifier *n = intel_attached_hdmi(connector)->cec_notifier; + + cec_notifier_conn_unregister(n); intel_connector_destroy(connector); } @@ -3007,7 +3069,7 @@ static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv, if (HAS_PCH_MCC(dev_priv)) ddc_pin = mcc_port_to_ddc_pin(dev_priv, port); - else if (HAS_PCH_TGP(dev_priv) || HAS_PCH_ICP(dev_priv)) + else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) ddc_pin = icl_port_to_ddc_pin(dev_priv, port); else if (HAS_PCH_CNP(dev_priv)) ddc_pin = cnp_port_to_ddc_pin(dev_priv, port); @@ -3073,13 +3135,15 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, struct drm_device *dev = intel_encoder->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); enum port port = intel_encoder->port; + struct cec_connector_info conn_info; - DRM_DEBUG_KMS("Adding HDMI connector on port %c\n", - port_name(port)); + DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n", + intel_encoder->base.base.id, intel_encoder->base.name); if (WARN(intel_dig_port->max_lanes < 4, - "Not enough lanes (%d) for HDMI on port %c\n", - intel_dig_port->max_lanes, port_name(port))) + "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n", + intel_dig_port->max_lanes, intel_encoder->base.base.id, + intel_encoder->base.name)) return; drm_connector_init(dev, connector, &intel_hdmi_connector_funcs, @@ -3125,8 +3189,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port, I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); } - intel_hdmi->cec_notifier = cec_notifier_get_conn(dev->dev, - port_identifier(port)); + cec_fill_conn_info_from_drm(&conn_info, connector); + + intel_hdmi->cec_notifier = + cec_notifier_conn_register(dev->dev, port_identifier(port), + &conn_info); if (!intel_hdmi->cec_notifier) DRM_DEBUG_KMS("CEC notifier get failed\n"); } @@ -3216,11 +3283,11 @@ void intel_hdmi_init(struct drm_i915_private *dev_priv, intel_encoder->port = port; if (IS_CHERRYVIEW(dev_priv)) { if (port == PORT_D) - intel_encoder->crtc_mask = 1 << 2; + intel_encoder->pipe_mask = BIT(PIPE_C); else - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B); } else { - intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + intel_encoder->pipe_mask = ~0; } intel_encoder->cloneable = 1 << INTEL_OUTPUT_ANALOG; /* diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index 106c2e0bc3c9..cf1ea5427639 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -23,6 +23,7 @@ struct intel_crtc_state; struct intel_hdmi; struct drm_connector_state; union hdmi_infoframe; +enum port; void intel_hdmi_init(struct drm_i915_private *dev_priv, i915_reg_t hdmi_reg, enum port port); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 56be20f6f47e..fc29046d48ea 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -481,7 +481,8 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, long_hpd = long_mask & BIT(pin); - DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", port_name(port), + DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n", + encoder->base.base.id, encoder->base.name, long_hpd ? "long" : "short"); queue_dig = true; diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h index b0cd447b7fbc..087b5f57b321 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.h +++ b/drivers/gpu/drm/i915/display/intel_hotplug.h @@ -13,6 +13,7 @@ struct drm_i915_private; struct intel_connector; struct intel_encoder; +enum port; void intel_hpd_poll_init(struct drm_i915_private *dev_priv); enum intel_hotplug_state intel_encoder_hotplug(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index b19800b58442..0b67f7887cd0 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -114,7 +114,7 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv) pinfo.size_data = sizeof(*pdata); pinfo.dma_mask = DMA_BIT_MASK(32); - pdata->num_pipes = INTEL_INFO(dev_priv)->num_pipes; + pdata->num_pipes = INTEL_NUM_PIPES(dev_priv); pdata->num_ports = IS_CHERRYVIEW(dev_priv) ? 3 : 2; /* B,C,D or B,C */ pdata->port[0].pipe = -1; pdata->port[1].pipe = -1; diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index b7c459a8931c..b1bc78623647 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -232,7 +232,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc); const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; - int pipe = crtc->pipe; + enum pipe pipe = crtc->pipe; u32 temp; if (HAS_PCH_SPLIT(dev_priv)) { @@ -899,12 +899,10 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER; intel_encoder->port = PORT_NONE; intel_encoder->cloneable = 0; - if (HAS_PCH_SPLIT(dev_priv)) - intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); - else if (IS_GEN(dev_priv, 4)) - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + if (INTEL_GEN(dev_priv) < 4) + intel_encoder->pipe_mask = BIT(PIPE_B); else - intel_encoder->crtc_mask = (1 << 1); + intel_encoder->pipe_mask = ~0; drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); connector->display_info.subpixel_order = SubPixelHorizontalRGB; diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 29edfc343716..848ce07a8ec2 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -30,6 +30,7 @@ #include <drm/i915_drm.h> #include "gem/i915_gem_pm.h" +#include "gt/intel_ring.h" #include "i915_drv.h" #include "i915_reg.h" @@ -230,7 +231,7 @@ alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *)) if (IS_ERR(rq)) return rq; - err = i915_active_ref(&overlay->last_flip, rq->timeline, rq); + err = i915_active_add_request(&overlay->last_flip, rq); if (err) { i915_request_add(rq); return ERR_PTR(err); @@ -439,8 +440,6 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay) struct i915_request *rq; u32 *cs; - lockdep_assert_held(&dev_priv->drm.struct_mutex); - /* * Only wait if there is actually an old frame to release to * guarantee forward progress. @@ -751,7 +750,6 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, struct i915_vma *vma; int ret, tmp_width; - lockdep_assert_held(&dev_priv->drm.struct_mutex); WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); ret = intel_overlay_release_old_vid(overlay); @@ -852,7 +850,6 @@ int intel_overlay_switch_off(struct intel_overlay *overlay) struct drm_i915_private *dev_priv = overlay->i915; int ret; - lockdep_assert_held(&dev_priv->drm.struct_mutex); WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex)); ret = intel_overlay_recover_from_interrupt(overlay); @@ -1068,11 +1065,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, if (!(params->flags & I915_OVERLAY_ENABLE)) { drm_modeset_lock_all(dev); - mutex_lock(&dev->struct_mutex); - ret = intel_overlay_switch_off(overlay); - - mutex_unlock(&dev->struct_mutex); drm_modeset_unlock_all(dev); return ret; @@ -1088,7 +1081,6 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, return -ENOENT; drm_modeset_lock_all(dev); - mutex_lock(&dev->struct_mutex); if (i915_gem_object_is_tiled(new_bo)) { DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n"); @@ -1152,14 +1144,12 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data, if (ret != 0) goto out_unlock; - mutex_unlock(&dev->struct_mutex); drm_modeset_unlock_all(dev); i915_gem_object_put(new_bo); return 0; out_unlock: - mutex_unlock(&dev->struct_mutex); drm_modeset_unlock_all(dev); i915_gem_object_put(new_bo); @@ -1233,7 +1223,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, } drm_modeset_lock_all(dev); - mutex_lock(&dev->struct_mutex); ret = -EINVAL; if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) { @@ -1290,7 +1279,6 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data, ret = 0; out_unlock: - mutex_unlock(&dev->struct_mutex); drm_modeset_unlock_all(dev); return ret; @@ -1303,15 +1291,11 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys) struct i915_vma *vma; int err; - mutex_lock(&i915->drm.struct_mutex); - obj = i915_gem_object_create_stolen(i915, PAGE_SIZE); - if (obj == NULL) + if (IS_ERR(obj)) obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto err_unlock; - } + if (IS_ERR(obj)) + return PTR_ERR(obj); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) { @@ -1332,13 +1316,10 @@ static int get_registers(struct intel_overlay *overlay, bool use_phys) } overlay->reg_bo = obj; - mutex_unlock(&i915->drm.struct_mutex); return 0; err_put_bo: i915_gem_object_put(obj); -err_unlock: - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -1367,8 +1348,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv) overlay->contrast = 75; overlay->saturation = 146; - i915_active_init(dev_priv, - &overlay->last_flip, + i915_active_init(&overlay->last_flip, NULL, intel_overlay_last_flip_retire); ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv)); diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 3bfb720560c2..6a9f322d3fca 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -76,7 +76,7 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, const struct intel_crtc_state *crtc_state) { /* Cannot enable DSC and PSR2 simultaneously */ - WARN_ON(crtc_state->dsc_params.compression_enable && + WARN_ON(crtc_state->dsc.compression_enable && crtc_state->has_psr2); switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) { @@ -88,48 +88,35 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv, } } -static int edp_psr_shift(enum transcoder cpu_transcoder) +static void psr_irq_control(struct drm_i915_private *dev_priv) { - switch (cpu_transcoder) { - case TRANSCODER_A: - return EDP_PSR_TRANSCODER_A_SHIFT; - case TRANSCODER_B: - return EDP_PSR_TRANSCODER_B_SHIFT; - case TRANSCODER_C: - return EDP_PSR_TRANSCODER_C_SHIFT; - default: - MISSING_CASE(cpu_transcoder); - /* fallthrough */ - case TRANSCODER_EDP: - return EDP_PSR_TRANSCODER_EDP_SHIFT; - } -} - -void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug) -{ - u32 debug_mask, mask; - enum transcoder cpu_transcoder; - u32 transcoders = BIT(TRANSCODER_EDP); + enum transcoder trans_shift; + u32 mask, val; + i915_reg_t imr_reg; - if (INTEL_GEN(dev_priv) >= 8) - transcoders |= BIT(TRANSCODER_A) | - BIT(TRANSCODER_B) | - BIT(TRANSCODER_C); - - debug_mask = 0; - mask = 0; - for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { - int shift = edp_psr_shift(cpu_transcoder); - - mask |= EDP_PSR_ERROR(shift); - debug_mask |= EDP_PSR_POST_EXIT(shift) | - EDP_PSR_PRE_ENTRY(shift); + /* + * gen12+ has registers relative to transcoder and one per transcoder + * using the same bit definition: handle it as TRANSCODER_EDP to force + * 0 shift in bit definition + */ + if (INTEL_GEN(dev_priv) >= 12) { + trans_shift = 0; + imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); + } else { + trans_shift = dev_priv->psr.transcoder; + imr_reg = EDP_PSR_IMR; } - if (debug & I915_PSR_DEBUG_IRQ) - mask |= debug_mask; + mask = EDP_PSR_ERROR(trans_shift); + if (dev_priv->psr.debug & I915_PSR_DEBUG_IRQ) + mask |= EDP_PSR_POST_EXIT(trans_shift) | + EDP_PSR_PRE_ENTRY(trans_shift); - I915_WRITE(EDP_PSR_IMR, ~mask); + /* Warning: it is masking/setting reserved bits too */ + val = I915_READ(imr_reg); + val &= ~EDP_PSR_TRANS_MASK(trans_shift); + val |= ~mask; + I915_WRITE(imr_reg, val); } static void psr_event_print(u32 val, bool psr2_enabled) @@ -171,60 +158,58 @@ static void psr_event_print(u32 val, bool psr2_enabled) void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir) { - u32 transcoders = BIT(TRANSCODER_EDP); - enum transcoder cpu_transcoder; + enum transcoder cpu_transcoder = dev_priv->psr.transcoder; + enum transcoder trans_shift; + i915_reg_t imr_reg; ktime_t time_ns = ktime_get(); - u32 mask = 0; - if (INTEL_GEN(dev_priv) >= 8) - transcoders |= BIT(TRANSCODER_A) | - BIT(TRANSCODER_B) | - BIT(TRANSCODER_C); + if (INTEL_GEN(dev_priv) >= 12) { + trans_shift = 0; + imr_reg = TRANS_PSR_IMR(dev_priv->psr.transcoder); + } else { + trans_shift = dev_priv->psr.transcoder; + imr_reg = EDP_PSR_IMR; + } - for_each_cpu_transcoder_masked(dev_priv, cpu_transcoder, transcoders) { - int shift = edp_psr_shift(cpu_transcoder); + if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) { + dev_priv->psr.last_entry_attempt = time_ns; + DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", + transcoder_name(cpu_transcoder)); + } - if (psr_iir & EDP_PSR_ERROR(shift)) { - DRM_WARN("[transcoder %s] PSR aux error\n", - transcoder_name(cpu_transcoder)); + if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) { + dev_priv->psr.last_exit = time_ns; + DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", + transcoder_name(cpu_transcoder)); - dev_priv->psr.irq_aux_error = true; + if (INTEL_GEN(dev_priv) >= 9) { + u32 val = I915_READ(PSR_EVENT(cpu_transcoder)); + bool psr2_enabled = dev_priv->psr.psr2_enabled; - /* - * If this interruption is not masked it will keep - * interrupting so fast that it prevents the scheduled - * work to run. - * Also after a PSR error, we don't want to arm PSR - * again so we don't care about unmask the interruption - * or unset irq_aux_error. - */ - mask |= EDP_PSR_ERROR(shift); - } - - if (psr_iir & EDP_PSR_PRE_ENTRY(shift)) { - dev_priv->psr.last_entry_attempt = time_ns; - DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n", - transcoder_name(cpu_transcoder)); + I915_WRITE(PSR_EVENT(cpu_transcoder), val); + psr_event_print(val, psr2_enabled); } + } - if (psr_iir & EDP_PSR_POST_EXIT(shift)) { - dev_priv->psr.last_exit = time_ns; - DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n", - transcoder_name(cpu_transcoder)); + if (psr_iir & EDP_PSR_ERROR(trans_shift)) { + u32 val; - if (INTEL_GEN(dev_priv) >= 9) { - u32 val = I915_READ(PSR_EVENT(cpu_transcoder)); - bool psr2_enabled = dev_priv->psr.psr2_enabled; + DRM_WARN("[transcoder %s] PSR aux error\n", + transcoder_name(cpu_transcoder)); - I915_WRITE(PSR_EVENT(cpu_transcoder), val); - psr_event_print(val, psr2_enabled); - } - } - } + dev_priv->psr.irq_aux_error = true; - if (mask) { - mask |= I915_READ(EDP_PSR_IMR); - I915_WRITE(EDP_PSR_IMR, mask); + /* + * If this interruption is not masked it will keep + * interrupting so fast that it prevents the scheduled + * work to run. + * Also after a PSR error, we don't want to arm PSR + * again so we don't care about unmask the interruption + * or unset irq_aux_error. + */ + val = I915_READ(imr_reg); + val |= EDP_PSR_ERROR(trans_shift); + I915_WRITE(imr_reg, val); schedule_work(&dev_priv->psr.work); } @@ -283,6 +268,11 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) struct drm_i915_private *dev_priv = to_i915(dp_to_dig_port(intel_dp)->base.base.dev); + if (dev_priv->psr.dp) { + DRM_WARN("More than one eDP panel found, PSR support should be extended\n"); + return; + } + drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, sizeof(intel_dp->psr_dpcd)); @@ -305,7 +295,6 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp) dev_priv->psr.sink_sync_latency = intel_dp_get_sink_sync_latency(intel_dp); - WARN_ON(dev_priv->psr.dp); dev_priv->psr.dp = intel_dp; if (INTEL_GEN(dev_priv) >= 9 && @@ -390,7 +379,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) BUILD_BUG_ON(sizeof(aux_msg) > 20); for (i = 0; i < sizeof(aux_msg); i += 4) - I915_WRITE(EDP_PSR_AUX_DATA(i >> 2), + I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2), intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i)); aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0); @@ -401,7 +390,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp) /* Select only valid bits for SRD_AUX_CTL */ aux_ctl &= psr_aux_mask; - I915_WRITE(EDP_PSR_AUX_CTL, aux_ctl); + I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl); } static void intel_psr_enable_sink(struct intel_dp *intel_dp) @@ -491,8 +480,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) if (INTEL_GEN(dev_priv) >= 8) val |= EDP_PSR_CRC_ENABLE; - val |= I915_READ(EDP_PSR_CTL) & EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK; - I915_WRITE(EDP_PSR_CTL, val); + val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & + EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK); + I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); } static void hsw_activate_psr2(struct intel_dp *intel_dp) @@ -528,9 +518,87 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) * PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is * recommending keep this bit unset while PSR2 is enabled. */ - I915_WRITE(EDP_PSR_CTL, 0); + I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0); + + I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); +} - I915_WRITE(EDP_PSR2_CTL, val); +static bool +transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder trans) +{ + if (INTEL_GEN(dev_priv) < 9) + return false; + else if (INTEL_GEN(dev_priv) >= 12) + return trans == TRANSCODER_A; + else + return trans == TRANSCODER_EDP; +} + +static u32 intel_get_frame_time_us(const struct intel_crtc_state *cstate) +{ + if (!cstate || !cstate->base.active) + return 0; + + return DIV_ROUND_UP(1000 * 1000, + drm_mode_vrefresh(&cstate->base.adjusted_mode)); +} + +static void psr2_program_idle_frames(struct drm_i915_private *dev_priv, + u32 idle_frames) +{ + u32 val; + + idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT; + val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); + val &= ~EDP_PSR2_IDLE_FRAME_MASK; + val |= idle_frames; + I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); +} + +static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv) +{ + psr2_program_idle_frames(dev_priv, 0); + intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO); +} + +static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv) +{ + int idle_frames; + + intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6); + /* + * Restore PSR2 idle frame let's use 6 as the minimum to cover all known + * cases including the off-by-one issue that HW has in some cases. + */ + idle_frames = max(6, dev_priv->vbt.psr.idle_frames); + idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1); + psr2_program_idle_frames(dev_priv, idle_frames); +} + +static void tgl_dc5_idle_thread(struct work_struct *work) +{ + struct drm_i915_private *dev_priv = + container_of(work, typeof(*dev_priv), psr.idle_work.work); + + mutex_lock(&dev_priv->psr.lock); + /* If delayed work is pending, it is not idle */ + if (delayed_work_pending(&dev_priv->psr.idle_work)) + goto unlock; + + DRM_DEBUG_KMS("DC5/6 idle thread\n"); + tgl_psr2_disable_dc3co(dev_priv); +unlock: + mutex_unlock(&dev_priv->psr.lock); +} + +static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv) +{ + if (!dev_priv->psr.dc3co_enabled) + return; + + cancel_delayed_work(&dev_priv->psr.idle_work); + /* Before PSR2 exit disallow dc3co*/ + tgl_psr2_disable_dc3co(dev_priv); } static bool intel_psr2_config_valid(struct intel_dp *intel_dp, @@ -544,17 +612,26 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, if (!dev_priv->psr.sink_psr2_support) return false; + if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) { + DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n", + transcoder_name(crtc_state->cpu_transcoder)); + return false; + } + /* * DSC and PSR2 cannot be enabled simultaneously. If a requested * resolution requires DSC to be enabled, priority is given to DSC * over PSR2. */ - if (crtc_state->dsc_params.compression_enable) { + if (crtc_state->dsc.compression_enable) { DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n"); return false; } - if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 12) { + psr_max_h = 5120; + psr_max_v = 3200; + } else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { psr_max_h = 4096; psr_max_v = 2304; } else if (IS_GEN(dev_priv, 9)) { @@ -606,10 +683,9 @@ void intel_psr_compute_config(struct intel_dp *intel_dp, /* * HSW spec explicitly says PSR is tied to port A. - * BDW+ platforms with DDI implementation of PSR have different - * PSR registers per transcoder and we only implement transcoder EDP - * ones. Since by Display design transcoder EDP is tied to port A - * we can safely escape based on the port A. + * BDW+ platforms have a instance of PSR registers per transcoder but + * for now it only supports one instance of PSR, so lets keep it + * hardcoded to PORT_A */ if (dig_port->base.port != PORT_A) { DRM_DEBUG_KMS("PSR condition failed: Port not supported\n"); @@ -648,9 +724,10 @@ static void intel_psr_activate(struct intel_dp *intel_dp) { struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - if (INTEL_GEN(dev_priv) >= 9) - WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); - WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); + if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) + WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE); + + WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE); WARN_ON(dev_priv->psr.active); lockdep_assert_held(&dev_priv->psr.lock); @@ -663,25 +740,6 @@ static void intel_psr_activate(struct intel_dp *intel_dp) dev_priv->psr.active = true; } -static i915_reg_t gen9_chicken_trans_reg(struct drm_i915_private *dev_priv, - enum transcoder cpu_transcoder) -{ - static const i915_reg_t regs[] = { - [TRANSCODER_A] = CHICKEN_TRANS_A, - [TRANSCODER_B] = CHICKEN_TRANS_B, - [TRANSCODER_C] = CHICKEN_TRANS_C, - [TRANSCODER_EDP] = CHICKEN_TRANS_EDP, - }; - - WARN_ON(INTEL_GEN(dev_priv) < 9); - - if (WARN_ON(cpu_transcoder >= ARRAY_SIZE(regs) || - !regs[cpu_transcoder].reg)) - cpu_transcoder = TRANSCODER_A; - - return regs[cpu_transcoder]; -} - static void intel_psr_enable_source(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { @@ -697,8 +755,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))) { - i915_reg_t reg = gen9_chicken_trans_reg(dev_priv, - cpu_transcoder); + i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder); u32 chicken = I915_READ(reg); chicken |= PSR2_VSC_ENABLE_PROG_HEADER | @@ -720,19 +777,46 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp, if (INTEL_GEN(dev_priv) < 11) mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE; - I915_WRITE(EDP_PSR_DEBUG, mask); + I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask); + + psr_irq_control(dev_priv); } static void intel_psr_enable_locked(struct drm_i915_private *dev_priv, const struct intel_crtc_state *crtc_state) { struct intel_dp *intel_dp = dev_priv->psr.dp; + u32 val; WARN_ON(dev_priv->psr.enabled); dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state); dev_priv->psr.busy_frontbuffer_bits = 0; dev_priv->psr.pipe = to_intel_crtc(crtc_state->base.crtc)->pipe; + dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline; + dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state); + dev_priv->psr.transcoder = crtc_state->cpu_transcoder; + + /* + * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR + * will still keep the error set even after the reset done in the + * irq_preinstall and irq_uninstall hooks. + * And enabling in this situation cause the screen to freeze in the + * first time that PSR HW tries to activate so lets keep PSR disabled + * to avoid any rendering problems. + */ + if (INTEL_GEN(dev_priv) >= 12) { + val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder)); + val &= EDP_PSR_ERROR(0); + } else { + val = I915_READ(EDP_PSR_IIR); + val &= EDP_PSR_ERROR(dev_priv->psr.transcoder); + } + if (val) { + dev_priv->psr.sink_not_reliable = true; + DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n"); + return; + } DRM_DEBUG_KMS("Enabling PSR%s\n", dev_priv->psr.psr2_enabled ? "2" : "1"); @@ -782,20 +866,28 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv) u32 val; if (!dev_priv->psr.active) { - if (INTEL_GEN(dev_priv) >= 9) - WARN_ON(I915_READ(EDP_PSR2_CTL) & EDP_PSR2_ENABLE); - WARN_ON(I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE); + if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) { + val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); + WARN_ON(val & EDP_PSR2_ENABLE); + } + + val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); + WARN_ON(val & EDP_PSR_ENABLE); + return; } if (dev_priv->psr.psr2_enabled) { - val = I915_READ(EDP_PSR2_CTL); + tgl_disallow_dc3co_on_psr2_exit(dev_priv); + val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); WARN_ON(!(val & EDP_PSR2_ENABLE)); - I915_WRITE(EDP_PSR2_CTL, val & ~EDP_PSR2_ENABLE); + val &= ~EDP_PSR2_ENABLE; + I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val); } else { - val = I915_READ(EDP_PSR_CTL); + val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); WARN_ON(!(val & EDP_PSR_ENABLE)); - I915_WRITE(EDP_PSR_CTL, val & ~EDP_PSR_ENABLE); + val &= ~EDP_PSR_ENABLE; + I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val); } dev_priv->psr.active = false; } @@ -817,10 +909,10 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) intel_psr_exit(dev_priv); if (dev_priv->psr.psr2_enabled) { - psr_status = EDP_PSR2_STATUS; + psr_status = EDP_PSR2_STATUS(dev_priv->psr.transcoder); psr_status_mask = EDP_PSR2_STATUS_STATE_MASK; } else { - psr_status = EDP_PSR_STATUS; + psr_status = EDP_PSR_STATUS(dev_priv->psr.transcoder); psr_status_mask = EDP_PSR_STATUS_STATE_MASK; } @@ -859,6 +951,7 @@ void intel_psr_disable(struct intel_dp *intel_dp, mutex_unlock(&dev_priv->psr.lock); cancel_work_sync(&dev_priv->psr.work); + cancel_delayed_work_sync(&dev_priv->psr.idle_work); } static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv) @@ -963,7 +1056,8 @@ int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, * defensive enough to cover everything. */ - return __intel_wait_for_register(&dev_priv->uncore, EDP_PSR_STATUS, + return __intel_wait_for_register(&dev_priv->uncore, + EDP_PSR_STATUS(dev_priv->psr.transcoder), EDP_PSR_STATUS_STATE_MASK, EDP_PSR_STATUS_STATE_IDLE, 2, 50, out_value); @@ -979,10 +1073,10 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv) return false; if (dev_priv->psr.psr2_enabled) { - reg = EDP_PSR2_STATUS; + reg = EDP_PSR2_STATUS(dev_priv->psr.transcoder); mask = EDP_PSR2_STATUS_STATE_MASK; } else { - reg = EDP_PSR_STATUS; + reg = EDP_PSR_STATUS(dev_priv->psr.transcoder); mask = EDP_PSR_STATUS_STATE_MASK; } @@ -1067,7 +1161,13 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val) old_mode = dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK; dev_priv->psr.debug = val; - intel_psr_irq_control(dev_priv, dev_priv->psr.debug); + + /* + * Do it right away if it's already enabled, otherwise it will be done + * when enabling the source. + */ + if (dev_priv->psr.enabled) + psr_irq_control(dev_priv); mutex_unlock(&dev_priv->psr.lock); @@ -1159,6 +1259,44 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv, mutex_unlock(&dev_priv->psr.lock); } +/* + * When we will be completely rely on PSR2 S/W tracking in future, + * intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP + * event also therefore tgl_dc3co_flush() require to be changed + * accrodingly in future. + */ +static void +tgl_dc3co_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, enum fb_op_origin origin) +{ + u32 delay; + + mutex_lock(&dev_priv->psr.lock); + + if (!dev_priv->psr.dc3co_enabled) + goto unlock; + + if (!dev_priv->psr.psr2_enabled || !dev_priv->psr.active) + goto unlock; + + /* + * At every frontbuffer flush flip event modified delay of delayed work, + * when delayed work schedules that means display has been idle. + */ + if (!(frontbuffer_bits & + INTEL_FRONTBUFFER_ALL_MASK(dev_priv->psr.pipe))) + goto unlock; + + tgl_psr2_enable_dc3co(dev_priv); + /* DC5/DC6 required idle frames = 6 */ + delay = 6 * dev_priv->psr.dc3co_exit_delay; + mod_delayed_work(system_wq, &dev_priv->psr.idle_work, + usecs_to_jiffies(delay)); + +unlock: + mutex_unlock(&dev_priv->psr.lock); +} + /** * intel_psr_flush - Flush PSR * @dev_priv: i915 device @@ -1178,8 +1316,10 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, if (!CAN_PSR(dev_priv)) return; - if (origin == ORIGIN_FLIP) + if (origin == ORIGIN_FLIP) { + tgl_dc3co_flush(dev_priv, frontbuffer_bits, origin); return; + } mutex_lock(&dev_priv->psr.lock); if (!dev_priv->psr.enabled) { @@ -1208,45 +1348,34 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, */ void intel_psr_init(struct drm_i915_private *dev_priv) { - u32 val; - if (!HAS_PSR(dev_priv)) return; - dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? - HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; - if (!dev_priv->psr.sink_support) return; + if (IS_HASWELL(dev_priv)) + /* + * HSW don't have PSR registers on the same space as transcoder + * so set this to a value that when subtract to the register + * in transcoder space results in the right offset for HSW + */ + dev_priv->hsw_psr_mmio_adjust = _SRD_CTL_EDP - _HSW_EDP_PSR_BASE; + if (i915_modparams.enable_psr == -1) if (INTEL_GEN(dev_priv) < 9 || !dev_priv->vbt.psr.enable) i915_modparams.enable_psr = 0; - /* - * If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR - * will still keep the error set even after the reset done in the - * irq_preinstall and irq_uninstall hooks. - * And enabling in this situation cause the screen to freeze in the - * first time that PSR HW tries to activate so lets keep PSR disabled - * to avoid any rendering problems. - */ - val = I915_READ(EDP_PSR_IIR); - val &= EDP_PSR_ERROR(edp_psr_shift(TRANSCODER_EDP)); - if (val) { - DRM_DEBUG_KMS("PSR interruption error set\n"); - dev_priv->psr.sink_not_reliable = true; - } - /* Set link_standby x link_off defaults */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) /* HSW and BDW require workarounds that we don't implement. */ dev_priv->psr.link_standby = false; - else - /* For new platforms let's respect VBT back again */ + else if (INTEL_GEN(dev_priv) < 12) + /* For new platforms up to TGL let's respect VBT back again */ dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link; INIT_WORK(&dev_priv->psr.work, intel_psr_work); + INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread); mutex_init(&dev_priv->psr.lock); } @@ -1288,7 +1417,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp) if (val & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR) DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n"); if (val & DP_PSR_LINK_CRC_ERROR) - DRM_ERROR("PSR Link CRC error, disabling PSR\n"); + DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n"); if (val & ~errors) DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n", diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h index dc818826f36d..46e4de8b8cd5 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.h +++ b/drivers/gpu/drm/i915/display/intel_psr.h @@ -30,7 +30,6 @@ void intel_psr_flush(struct drm_i915_private *dev_priv, void intel_psr_init(struct drm_i915_private *dev_priv); void intel_psr_compute_config(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state); -void intel_psr_irq_control(struct drm_i915_private *dev_priv, u32 debug); void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir); void intel_psr_short_pulse(struct intel_dp *intel_dp); int intel_psr_wait_for_idle(const struct intel_crtc_state *new_crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index adeb1c840976..5b7f4baf7348 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2921,7 +2921,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) bytes[0], bytes[1]); return false; } - intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); + intel_sdvo->base.pipe_mask = ~0; return true; } diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h index c9e05bcdd141..a66f224aa17d 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.h +++ b/drivers/gpu/drm/i915/display/intel_sdvo.h @@ -14,6 +14,7 @@ struct drm_i915_private; enum pipe; +enum port; bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv, i915_reg_t sdvo_reg, enum pipe *pipe); diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index cae25e493128..72fda0430062 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -48,19 +48,6 @@ #include "intel_psr.h" #include "intel_sprite.h" -bool is_planar_yuv_format(u32 pixelformat) -{ - switch (pixelformat) { - case DRM_FORMAT_NV12: - case DRM_FORMAT_P010: - case DRM_FORMAT_P012: - case DRM_FORMAT_P016: - return true; - default: - return false; - } -} - int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs) { @@ -300,10 +287,8 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) src_y = src->y1 >> 16; src_h = drm_rect_height(src) >> 16; - src->x1 = src_x << 16; - src->x2 = (src_x + src_w) << 16; - src->y1 = src_y << 16; - src->y2 = (src_y + src_h) << 16; + drm_rect_init(src, src_x << 16, src_y << 16, + src_w << 16, src_h << 16); if (!fb->format->is_yuv) return 0; @@ -337,6 +322,55 @@ bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id) icl_hdr_plane_mask() & BIT(plane_id); } +static void +skl_plane_ratio(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); + const struct drm_framebuffer *fb = plane_state->base.fb; + + if (fb->format->cpp[0] == 8) { + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) { + *num = 10; + *den = 8; + } else { + *num = 9; + *den = 8; + } + } else { + *num = 1; + *den = 1; + } +} + +static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev); + unsigned int pixel_rate = crtc_state->pixel_rate; + unsigned int src_w, src_h, dst_w, dst_h; + unsigned int num, den; + + skl_plane_ratio(crtc_state, plane_state, &num, &den); + + /* two pixels per clock on glk+ */ + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) + den *= 2; + + src_w = drm_rect_width(&plane_state->base.src) >> 16; + src_h = drm_rect_height(&plane_state->base.src) >> 16; + dst_w = drm_rect_width(&plane_state->base.dst); + dst_h = drm_rect_height(&plane_state->base.dst); + + /* Downscaling limits the maximum pixel rate */ + dst_w = min(src_w, dst_w); + dst_h = min(src_h, dst_h); + + return DIV64_U64_ROUND_UP(mul_u32_u32(pixel_rate * num, src_w * src_h), + mul_u32_u32(den, dst_w * dst_h)); +} + static unsigned int skl_plane_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, @@ -361,6 +395,7 @@ skl_program_scaler(struct intel_plane *plane, const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); + const struct drm_framebuffer *fb = plane_state->base.fb; enum pipe pipe = plane->pipe; int scaler_id = plane_state->scaler_id; const struct intel_scaler *scaler = @@ -381,7 +416,7 @@ skl_program_scaler(struct intel_plane *plane, 0, INT_MAX); /* TODO: handle sub-pixel coordinates */ - if (is_planar_yuv_format(plane_state->base.fb->format->format) && + if (drm_format_info_is_yuv_semiplanar(fb->format) && !icl_is_hdr_plane(dev_priv, plane->id)) { y_hphase = skl_scaler_calc_phase(1, hscale, false); y_vphase = skl_scaler_calc_phase(1, vscale, false); @@ -554,7 +589,7 @@ skl_program_plane(struct intel_plane *plane, u32 y = plane_state->color_plane[color_plane].y; u32 src_w = drm_rect_width(&plane_state->base.src) >> 16; u32 src_h = drm_rect_height(&plane_state->base.src) >> 16; - struct intel_plane *linked = plane_state->linked_plane; + struct intel_plane *linked = plane_state->planar_linked_plane; const struct drm_framebuffer *fb = plane_state->base.fb; u8 alpha = plane_state->base.alpha >> 8; u32 plane_color_ctl = 0; @@ -653,7 +688,7 @@ skl_update_plane(struct intel_plane *plane, { int color_plane = 0; - if (plane_state->linked_plane) { + if (plane_state->planar_linked_plane) { /* Program the UV plane */ color_plane = 1; } @@ -825,6 +860,85 @@ vlv_update_clrc(const struct intel_plane_state *plane_state) SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos)); } +static void +vlv_plane_ratio(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int cpp = fb->format->cpp[0]; + + /* + * VLV bspec only considers cases where all three planes are + * enabled, and cases where the primary and one sprite is enabled. + * Let's assume the case with just two sprites enabled also + * maps to the latter case. + */ + if (hweight8(active_planes) == 3) { + switch (cpp) { + case 8: + *num = 11; + *den = 8; + break; + case 4: + *num = 18; + *den = 16; + break; + default: + *num = 1; + *den = 1; + break; + } + } else if (hweight8(active_planes) == 2) { + switch (cpp) { + case 8: + *num = 10; + *den = 8; + break; + case 4: + *num = 17; + *den = 16; + break; + default: + *num = 1; + *den = 1; + break; + } + } else { + switch (cpp) { + case 8: + *num = 10; + *den = 8; + break; + default: + *num = 1; + *den = 1; + break; + } + } +} + +int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int pixel_rate; + unsigned int num, den; + + /* + * Note that crtc_state->pixel_rate accounts for both + * horizontal and vertical panel fitter downscaling factors. + * Pre-HSW bspec tells us to only consider the horizontal + * downscaling factor here. We ignore that and just consider + * both for simplicity. + */ + pixel_rate = crtc_state->pixel_rate; + + vlv_plane_ratio(crtc_state, plane_state, &num, &den); + + return DIV_ROUND_UP(pixel_rate * num, den); +} + static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) { u32 sprctl = 0; @@ -1031,6 +1145,164 @@ vlv_plane_get_hw_state(struct intel_plane *plane, return ret; } +static void ivb_plane_ratio(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int cpp = fb->format->cpp[0]; + + if (hweight8(active_planes) == 2) { + switch (cpp) { + case 8: + *num = 10; + *den = 8; + break; + case 4: + *num = 17; + *den = 16; + break; + default: + *num = 1; + *den = 1; + break; + } + } else { + switch (cpp) { + case 8: + *num = 9; + *den = 8; + break; + default: + *num = 1; + *den = 1; + break; + } + } +} + +static void ivb_plane_ratio_scaling(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int cpp = fb->format->cpp[0]; + + switch (cpp) { + case 8: + *num = 12; + *den = 8; + break; + case 4: + *num = 19; + *den = 16; + break; + case 2: + *num = 33; + *den = 32; + break; + default: + *num = 1; + *den = 1; + break; + } +} + +int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int pixel_rate; + unsigned int num, den; + + /* + * Note that crtc_state->pixel_rate accounts for both + * horizontal and vertical panel fitter downscaling factors. + * Pre-HSW bspec tells us to only consider the horizontal + * downscaling factor here. We ignore that and just consider + * both for simplicity. + */ + pixel_rate = crtc_state->pixel_rate; + + ivb_plane_ratio(crtc_state, plane_state, &num, &den); + + return DIV_ROUND_UP(pixel_rate * num, den); +} + +static int ivb_sprite_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int src_w, dst_w, pixel_rate; + unsigned int num, den; + + /* + * Note that crtc_state->pixel_rate accounts for both + * horizontal and vertical panel fitter downscaling factors. + * Pre-HSW bspec tells us to only consider the horizontal + * downscaling factor here. We ignore that and just consider + * both for simplicity. + */ + pixel_rate = crtc_state->pixel_rate; + + src_w = drm_rect_width(&plane_state->base.src) >> 16; + dst_w = drm_rect_width(&plane_state->base.dst); + + if (src_w != dst_w) + ivb_plane_ratio_scaling(crtc_state, plane_state, &num, &den); + else + ivb_plane_ratio(crtc_state, plane_state, &num, &den); + + /* Horizontal downscaling limits the maximum pixel rate */ + dst_w = min(src_w, dst_w); + + return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, num * src_w), + den * dst_w); +} + +static void hsw_plane_ratio(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state, + unsigned int *num, unsigned int *den) +{ + u8 active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int cpp = fb->format->cpp[0]; + + if (hweight8(active_planes) == 2) { + switch (cpp) { + case 8: + *num = 10; + *den = 8; + break; + default: + *num = 1; + *den = 1; + break; + } + } else { + switch (cpp) { + case 8: + *num = 9; + *den = 8; + break; + default: + *num = 1; + *den = 1; + break; + } + } +} + +int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + unsigned int pixel_rate = crtc_state->pixel_rate; + unsigned int num, den; + + hsw_plane_ratio(crtc_state, plane_state, &num, &den); + + return DIV_ROUND_UP(pixel_rate * num, den); +} + static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) { u32 sprctl = 0; @@ -1044,6 +1316,16 @@ static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) return sprctl; } +static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state) +{ + struct drm_i915_private *dev_priv = + to_i915(plane_state->base.plane->dev); + const struct drm_framebuffer *fb = plane_state->base.fb; + + return fb->format->cpp[0] == 8 && + (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)); +} + static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { @@ -1066,6 +1348,12 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, case DRM_FORMAT_XRGB8888: sprctl |= SPRITE_FORMAT_RGBX888; break; + case DRM_FORMAT_XBGR16161616F: + sprctl |= SPRITE_FORMAT_RGBX161616 | SPRITE_RGB_ORDER_RGBX; + break; + case DRM_FORMAT_XRGB16161616F: + sprctl |= SPRITE_FORMAT_RGBX161616; + break; case DRM_FORMAT_YUYV: sprctl |= SPRITE_FORMAT_YUV422 | SPRITE_YUV_ORDER_YUYV; break; @@ -1083,7 +1371,8 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, return 0; } - sprctl |= SPRITE_INT_GAMMA_DISABLE; + if (!ivb_need_sprite_gamma(plane_state)) + sprctl |= SPRITE_INT_GAMMA_DISABLE; if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709) sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709; @@ -1105,12 +1394,26 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, return sprctl; } -static void ivb_sprite_linear_gamma(u16 gamma[18]) +static void ivb_sprite_linear_gamma(const struct intel_plane_state *plane_state, + u16 gamma[18]) { - int i; + int scale, i; - for (i = 0; i < 17; i++) - gamma[i] = (i << 10) / 16; + /* + * WaFP16GammaEnabling:ivb,hsw + * "Workaround : When using the 64-bit format, the sprite output + * on each color channel has one quarter amplitude. It can be + * brought up to full amplitude by using sprite internal gamma + * correction, pipe gamma correction, or pipe color space + * conversion to multiply the sprite output by four." + */ + scale = 4; + + for (i = 0; i < 16; i++) + gamma[i] = min((scale * i << 10) / 16, (1 << 10) - 1); + + gamma[i] = min((scale * i << 10) / 16, 1 << 10); + i++; gamma[i] = 3 << 10; i++; @@ -1124,7 +1427,10 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state) u16 gamma[18]; int i; - ivb_sprite_linear_gamma(gamma); + if (!ivb_need_sprite_gamma(plane_state)) + return; + + ivb_sprite_linear_gamma(plane_state, gamma); /* FIXME these register are single buffered :( */ for (i = 0; i < 16; i++) @@ -1257,6 +1563,53 @@ ivb_plane_get_hw_state(struct intel_plane *plane, return ret; } +static int g4x_sprite_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) +{ + const struct drm_framebuffer *fb = plane_state->base.fb; + unsigned int hscale, pixel_rate; + unsigned int limit, decimate; + + /* + * Note that crtc_state->pixel_rate accounts for both + * horizontal and vertical panel fitter downscaling factors. + * Pre-HSW bspec tells us to only consider the horizontal + * downscaling factor here. We ignore that and just consider + * both for simplicity. + */ + pixel_rate = crtc_state->pixel_rate; + + /* Horizontal downscaling limits the maximum pixel rate */ + hscale = drm_rect_calc_hscale(&plane_state->base.src, + &plane_state->base.dst, + 0, INT_MAX); + if (hscale < 0x10000) + return pixel_rate; + + /* Decimation steps at 2x,4x,8x,16x */ + decimate = ilog2(hscale >> 16); + hscale >>= decimate; + + /* Starting limit is 90% of cdclk */ + limit = 9; + + /* -10% per decimation step */ + limit -= decimate; + + /* -10% for RGB */ + if (fb->format->cpp[0] >= 4) + limit--; /* -10% for RGB */ + + /* + * We should also do -10% if sprite scaling is enabled + * on the other pipe, but we can't really check for that, + * so we ignore it. + */ + + return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, 10 * hscale), + limit << 16); +} + static unsigned int g4x_sprite_max_stride(struct intel_plane *plane, u32 pixel_format, u64 modifier, @@ -1300,6 +1653,12 @@ static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, case DRM_FORMAT_XRGB8888: dvscntr |= DVS_FORMAT_RGBX888; break; + case DRM_FORMAT_XBGR16161616F: + dvscntr |= DVS_FORMAT_RGBX161616 | DVS_RGB_ORDER_XBGR; + break; + case DRM_FORMAT_XRGB16161616F: + dvscntr |= DVS_FORMAT_RGBX161616; + break; case DRM_FORMAT_YUYV: dvscntr |= DVS_FORMAT_YUV422 | DVS_YUV_ORDER_YUYV; break; @@ -1513,6 +1872,11 @@ static bool intel_fb_scalable(const struct drm_framebuffer *fb) switch (fb->format->format) { case DRM_FORMAT_C8: return false; + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + return INTEL_GEN(to_i915(fb->dev)) >= 11; default: return true; } @@ -1791,7 +2155,7 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s int src_w = drm_rect_width(&plane_state->base.src) >> 16; /* Display WA #1106 */ - if (is_planar_yuv_format(fb->format->format) && src_w & 3 && + if (drm_format_info_is_yuv_semiplanar(fb->format) && src_w & 3 && (rotation == DRM_MODE_ROTATE_270 || rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) { DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n"); @@ -1801,6 +2165,22 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s return 0; } +static int skl_plane_max_scale(struct drm_i915_private *dev_priv, + const struct drm_framebuffer *fb) +{ + /* + * We don't yet know the final source width nor + * whether we can use the HQ scaler mode. Assume + * the best case. + * FIXME need to properly check this later. + */ + if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv) || + !drm_format_info_is_yuv_semiplanar(fb->format)) + return 0x30000 - 1; + else + return 0x20000 - 1; +} + static int skl_plane_check(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { @@ -1818,7 +2198,7 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, /* use scaler when colorkey is not required */ if (!plane_state->ckey.flags && intel_fb_scalable(fb)) { min_scale = 1; - max_scale = skl_max_scale(crtc_state, fb->format->format); + max_scale = skl_plane_max_scale(dev_priv, fb); } ret = drm_atomic_helper_check_plane_state(&plane_state->base, @@ -1993,8 +2373,10 @@ static const u64 i9xx_plane_format_modifiers[] = { }; static const u32 snb_plane_formats[] = { - DRM_FORMAT_XBGR8888, DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, @@ -2024,6 +2406,8 @@ static const u32 skl_plane_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, @@ -2039,6 +2423,8 @@ static const u32 skl_planar_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, @@ -2055,6 +2441,8 @@ static const u32 glk_planar_formats[] = { DRM_FORMAT_ABGR8888, DRM_FORMAT_XRGB2101010, DRM_FORMAT_XBGR2101010, + DRM_FORMAT_XRGB16161616F, + DRM_FORMAT_XBGR16161616F, DRM_FORMAT_YUYV, DRM_FORMAT_YVYU, DRM_FORMAT_UYVY, @@ -2158,6 +2546,13 @@ static const u64 skl_plane_format_modifiers_ccs[] = { DRM_FORMAT_MOD_INVALID }; +static const u64 gen12_plane_format_modifiers_noccs[] = { + I915_FORMAT_MOD_Y_TILED, + I915_FORMAT_MOD_X_TILED, + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID +}; + static bool g4x_sprite_format_mod_supported(struct drm_plane *_plane, u32 format, u64 modifier) { @@ -2198,6 +2593,8 @@ static bool snb_sprite_format_mod_supported(struct drm_plane *_plane, switch (format) { case DRM_FORMAT_XRGB8888: case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_XBGR16161616F: case DRM_FORMAT_YUYV: case DRM_FORMAT_YVYU: case DRM_FORMAT_UYVY: @@ -2306,6 +2703,55 @@ static bool skl_plane_format_mod_supported(struct drm_plane *_plane, } } +static bool gen12_plane_format_mod_supported(struct drm_plane *_plane, + u32 format, u64 modifier) +{ + switch (modifier) { + case DRM_FORMAT_MOD_LINEAR: + case I915_FORMAT_MOD_X_TILED: + case I915_FORMAT_MOD_Y_TILED: + break; + default: + return false; + } + + switch (format) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ARGB8888: + case DRM_FORMAT_ABGR8888: + case DRM_FORMAT_RGB565: + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_YUYV: + case DRM_FORMAT_YVYU: + case DRM_FORMAT_UYVY: + case DRM_FORMAT_VYUY: + case DRM_FORMAT_NV12: + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + case DRM_FORMAT_XVYU2101010: + case DRM_FORMAT_C8: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_Y210: + case DRM_FORMAT_Y212: + case DRM_FORMAT_Y216: + case DRM_FORMAT_XVYU12_16161616: + case DRM_FORMAT_XVYU16161616: + if (modifier == DRM_FORMAT_MOD_LINEAR || + modifier == I915_FORMAT_MOD_X_TILED || + modifier == I915_FORMAT_MOD_Y_TILED) + return true; + /* fall through */ + default: + return false; + } +} + static const struct drm_plane_funcs g4x_sprite_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, @@ -2342,6 +2788,15 @@ static const struct drm_plane_funcs skl_plane_funcs = { .format_mod_supported = skl_plane_format_mod_supported, }; +static const struct drm_plane_funcs gen12_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = intel_plane_destroy, + .atomic_duplicate_state = intel_plane_duplicate_state, + .atomic_destroy_state = intel_plane_destroy_state, + .format_mod_supported = gen12_plane_format_mod_supported, +}; + static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { @@ -2430,6 +2885,7 @@ struct intel_plane * skl_universal_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { + const struct drm_plane_funcs *plane_funcs; struct intel_plane *plane; enum drm_plane_type plane_type; unsigned int supported_rotations; @@ -2459,6 +2915,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, plane->disable_plane = skl_disable_plane; plane->get_hw_state = skl_plane_get_hw_state; plane->check_plane = skl_plane_check; + plane->min_cdclk = skl_plane_min_cdclk; if (icl_is_nv12_y_plane(plane_id)) plane->update_slave = icl_update_slave; @@ -2472,11 +2929,19 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, formats = skl_get_plane_formats(dev_priv, pipe, plane_id, &num_formats); - plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); - if (plane->has_ccs) - modifiers = skl_plane_format_modifiers_ccs; - else - modifiers = skl_plane_format_modifiers_noccs; + if (INTEL_GEN(dev_priv) >= 12) { + /* TODO: Implement support for gen-12 CCS modifiers */ + plane->has_ccs = false; + modifiers = gen12_plane_format_modifiers_noccs; + plane_funcs = &gen12_plane_funcs; + } else { + plane->has_ccs = skl_plane_has_ccs(dev_priv, pipe, plane_id); + if (plane->has_ccs) + modifiers = skl_plane_format_modifiers_ccs; + else + modifiers = skl_plane_format_modifiers_noccs; + plane_funcs = &skl_plane_funcs; + } if (plane_id == PLANE_PRIMARY) plane_type = DRM_PLANE_TYPE_PRIMARY; @@ -2486,7 +2951,7 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, possible_crtcs = BIT(pipe); ret = drm_universal_plane_init(&dev_priv->drm, &plane->base, - possible_crtcs, &skl_plane_funcs, + possible_crtcs, plane_funcs, formats, num_formats, modifiers, plane_type, "plane %d%c", plane_id + 1, @@ -2519,6 +2984,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv, BIT(DRM_MODE_BLEND_PREMULTI) | BIT(DRM_MODE_BLEND_COVERAGE)); + drm_plane_create_zpos_immutable_property(&plane->base, plane_id); + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); return plane; @@ -2540,7 +3007,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, const u64 *modifiers; const u32 *formats; int num_formats; - int ret; + int ret, zpos; if (INTEL_GEN(dev_priv) >= 9) return skl_universal_plane_create(dev_priv, pipe, @@ -2556,6 +3023,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->disable_plane = vlv_disable_plane; plane->get_hw_state = vlv_plane_get_hw_state; plane->check_plane = vlv_sprite_check; + plane->min_cdclk = vlv_plane_min_cdclk; formats = vlv_plane_formats; num_formats = ARRAY_SIZE(vlv_plane_formats); @@ -2569,6 +3037,11 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->get_hw_state = ivb_plane_get_hw_state; plane->check_plane = g4x_sprite_check; + if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) + plane->min_cdclk = hsw_plane_min_cdclk; + else + plane->min_cdclk = ivb_sprite_min_cdclk; + formats = snb_plane_formats; num_formats = ARRAY_SIZE(snb_plane_formats); modifiers = i9xx_plane_format_modifiers; @@ -2580,6 +3053,7 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, plane->disable_plane = g4x_disable_plane; plane->get_hw_state = g4x_plane_get_hw_state; plane->check_plane = g4x_sprite_check; + plane->min_cdclk = g4x_sprite_min_cdclk; modifiers = i9xx_plane_format_modifiers; if (IS_GEN(dev_priv, 6)) { @@ -2630,6 +3104,9 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv, DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); + zpos = sprite + 1; + drm_plane_create_zpos_immutable_property(&plane->base, zpos); + drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs); return plane; diff --git a/drivers/gpu/drm/i915/display/intel_sprite.h b/drivers/gpu/drm/i915/display/intel_sprite.h index 093a2d156f1e..5eeaa92420d1 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.h +++ b/drivers/gpu/drm/i915/display/intel_sprite.h @@ -17,7 +17,6 @@ struct drm_i915_private; struct intel_crtc_state; struct intel_plane_state; -bool is_planar_yuv_format(u32 pixelformat); int intel_usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, int usecs); struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, @@ -50,4 +49,11 @@ static inline u8 icl_hdr_plane_mask(void) bool icl_is_hdr_plane(struct drm_i915_private *dev_priv, enum plane_id plane_id); +int ivb_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +int hsw_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); +int vlv_plane_min_cdclk(const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state); + #endif /* __INTEL_SPRITE_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 85743a43bee2..7773169b7331 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -23,32 +23,38 @@ static const char *tc_port_mode_name(enum tc_port_mode mode) return names[mode]; } -static bool has_modular_fia(struct drm_i915_private *i915) -{ - if (!INTEL_INFO(i915)->display.has_modular_fia) - return false; - - return intel_uncore_read(&i915->uncore, - PORT_TX_DFLEXDPSP(FIA1)) & MODULAR_FIA_MASK; -} - -static enum phy_fia tc_port_to_fia(struct drm_i915_private *i915, - enum tc_port tc_port) +static void +tc_port_load_fia_params(struct drm_i915_private *i915, + struct intel_digital_port *dig_port) { - if (!has_modular_fia(i915)) - return FIA1; + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(i915, port); + u32 modular_fia; + + if (INTEL_INFO(i915)->display.has_modular_fia) { + modular_fia = intel_uncore_read(&i915->uncore, + PORT_TX_DFLEXDPSP(FIA1)); + modular_fia &= MODULAR_FIA_MASK; + } else { + modular_fia = 0; + } /* * Each Modular FIA instance houses 2 TC ports. In SOC that has more * than two TC ports, there are multiple instances of Modular FIA. */ - return tc_port / 2; + if (modular_fia) { + dig_port->tc_phy_fia = tc_port / 2; + dig_port->tc_phy_fia_idx = tc_port % 2; + } else { + dig_port->tc_phy_fia = FIA1; + dig_port->tc_phy_fia_idx = tc_port; + } } u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); struct intel_uncore *uncore = &i915->uncore; u32 lane_mask; @@ -57,8 +63,23 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) WARN_ON(lane_mask == 0xffffffff); - return (lane_mask & DP_LANE_ASSIGNMENT_MASK(tc_port)) >> - DP_LANE_ASSIGNMENT_SHIFT(tc_port); + lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx); + return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); +} + +u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) +{ + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + struct intel_uncore *uncore = &i915->uncore; + u32 pin_mask; + + pin_mask = intel_uncore_read(uncore, + PORT_TX_DFLEXPA1(dig_port->tc_phy_fia)); + + WARN_ON(pin_mask == 0xffffffff); + + return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >> + DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx); } int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port) @@ -95,7 +116,6 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, int required_lanes) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); bool lane_reversal = dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; struct intel_uncore *uncore = &i915->uncore; u32 val; @@ -104,19 +124,21 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, val = intel_uncore_read(uncore, PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia)); - val &= ~DFLEXDPMLE1_DPMLETC_MASK(tc_port); + val &= ~DFLEXDPMLE1_DPMLETC_MASK(dig_port->tc_phy_fia_idx); switch (required_lanes) { case 1: - val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3(tc_port) : - DFLEXDPMLE1_DPMLETC_ML0(tc_port); + val |= lane_reversal ? + DFLEXDPMLE1_DPMLETC_ML3(dig_port->tc_phy_fia_idx) : + DFLEXDPMLE1_DPMLETC_ML0(dig_port->tc_phy_fia_idx); break; case 2: - val |= lane_reversal ? DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) : - DFLEXDPMLE1_DPMLETC_ML1_0(tc_port); + val |= lane_reversal ? + DFLEXDPMLE1_DPMLETC_ML3_2(dig_port->tc_phy_fia_idx) : + DFLEXDPMLE1_DPMLETC_ML1_0(dig_port->tc_phy_fia_idx); break; case 4: - val |= DFLEXDPMLE1_DPMLETC_ML3_0(tc_port); + val |= DFLEXDPMLE1_DPMLETC_ML3_0(dig_port->tc_phy_fia_idx); break; default: MISSING_CASE(required_lanes); @@ -164,9 +186,9 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) return mask; } - if (val & TC_LIVE_STATE_TBT(tc_port)) + if (val & TC_LIVE_STATE_TBT(dig_port->tc_phy_fia_idx)) mask |= BIT(TC_PORT_TBT_ALT); - if (val & TC_LIVE_STATE_TC(tc_port)) + if (val & TC_LIVE_STATE_TC(dig_port->tc_phy_fia_idx)) mask |= BIT(TC_PORT_DP_ALT); if (intel_uncore_read(uncore, SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)) @@ -182,7 +204,6 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port) static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); struct intel_uncore *uncore = &i915->uncore; u32 val; @@ -194,14 +215,13 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) return false; } - return val & DP_PHY_MODE_STATUS_COMPLETED(tc_port); + return val & DP_PHY_MODE_STATUS_COMPLETED(dig_port->tc_phy_fia_idx); } static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, bool enable) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); struct intel_uncore *uncore = &i915->uncore; u32 val; @@ -215,9 +235,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, return false; } - val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); if (!enable) - val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port); + val |= DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx); intel_uncore_write(uncore, PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val); @@ -232,7 +252,6 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port, static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); - enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); struct intel_uncore *uncore = &i915->uncore; u32 val; @@ -244,7 +263,7 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port) return true; } - return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port)); + return !(val & DP_PHY_MODE_STATUS_NOT_SAFE(dig_port->tc_phy_fia_idx)); } /* @@ -540,5 +559,5 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) mutex_init(&dig_port->tc_lock); dig_port->tc_legacy_port = is_legacy; dig_port->tc_link_refcount = 0; - dig_port->tc_phy_fia = tc_port_to_fia(i915, tc_port); + tc_port_load_fia_params(i915, dig_port); } diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 783d75531435..463f1b3c836f 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -13,6 +13,7 @@ struct intel_digital_port; bool intel_tc_port_connected(struct intel_digital_port *dig_port); u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port); +u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port); int intel_tc_port_fia_max_lane_count(struct intel_digital_port *dig_port); void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, int required_lanes); diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c index b70221f5112a..9983fadf6c28 100644 --- a/drivers/gpu/drm/i915/display/intel_tv.c +++ b/drivers/gpu/drm/i915/display/intel_tv.c @@ -961,11 +961,10 @@ intel_tv_mode_valid(struct drm_connector *connector, return MODE_CLOCK_HIGH; /* Ensure TV refresh is close to desired refresh */ - if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) - < 1000) - return MODE_OK; + if (abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000) >= 1000) + return MODE_CLOCK_RANGE; - return MODE_CLOCK_RANGE; + return MODE_OK; } static int @@ -1702,7 +1701,7 @@ intel_tv_detect(struct drm_connector *connector, struct intel_load_detect_pipe tmp; int ret; - ret = intel_get_load_detect_pipe(connector, NULL, &tmp, ctx); + ret = intel_get_load_detect_pipe(connector, &tmp, ctx); if (ret < 0) return ret; @@ -1948,9 +1947,8 @@ intel_tv_init(struct drm_i915_private *dev_priv) intel_encoder->type = INTEL_OUTPUT_TVOUT; intel_encoder->power_domain = POWER_DOMAIN_PORT_OTHER; intel_encoder->port = PORT_NONE; - intel_encoder->crtc_mask = (1 << 0) | (1 << 1); + intel_encoder->pipe_mask = ~0; intel_encoder->cloneable = 0; - intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1)); intel_tv->type = DRM_MODE_CONNECTOR_Unknown; /* BIOS margin values */ diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index dfcd156b5094..69a7cb1fa121 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -114,6 +114,7 @@ enum bdb_block_id { BDB_LVDS_POWER = 44, BDB_MIPI_CONFIG = 52, BDB_MIPI_SEQUENCE = 53, + BDB_COMPRESSION_PARAMETERS = 56, BDB_SKIP = 254, /* VBIOS private block, ignore */ }; @@ -291,6 +292,8 @@ struct bdb_general_features { #define DVO_PORT_HDMIE 12 /* 193 */ #define DVO_PORT_DPF 13 /* N/A */ #define DVO_PORT_HDMIF 14 /* N/A */ +#define DVO_PORT_DPG 15 +#define DVO_PORT_HDMIG 16 #define DVO_PORT_MIPIA 21 /* 171 */ #define DVO_PORT_MIPIB 22 /* 171 */ #define DVO_PORT_MIPIC 23 /* 171 */ @@ -325,6 +328,7 @@ enum vbt_gmbus_ddi { #define DP_AUX_D 0x30 #define DP_AUX_E 0x50 #define DP_AUX_F 0x60 +#define DP_AUX_G 0x70 #define VBT_DP_MAX_LINK_RATE_HBR3 0 #define VBT_DP_MAX_LINK_RATE_HBR2 1 @@ -808,4 +812,55 @@ struct bdb_mipi_sequence { u8 data[0]; /* up to 6 variable length blocks */ } __packed; +/* + * Block 56 - Compression Parameters + */ + +#define VBT_RC_BUFFER_BLOCK_SIZE_1KB 0 +#define VBT_RC_BUFFER_BLOCK_SIZE_4KB 1 +#define VBT_RC_BUFFER_BLOCK_SIZE_16KB 2 +#define VBT_RC_BUFFER_BLOCK_SIZE_64KB 3 + +#define VBT_DSC_LINE_BUFFER_DEPTH(vbt_value) ((vbt_value) + 8) /* bits */ +#define VBT_DSC_MAX_BPP(vbt_value) (6 + (vbt_value) * 2) + +struct dsc_compression_parameters_entry { + u8 version_major:4; + u8 version_minor:4; + + u8 rc_buffer_block_size:2; + u8 reserved1:6; + + /* + * Buffer size in bytes: + * + * 4 ^ rc_buffer_block_size * 1024 * (rc_buffer_size + 1) bytes + */ + u8 rc_buffer_size; + u32 slices_per_line; + + u8 line_buffer_depth:4; + u8 reserved2:4; + + /* Flag Bits 1 */ + u8 block_prediction_enable:1; + u8 reserved3:7; + + u8 max_bpp; /* mapping */ + + /* Color depth capabilities */ + u8 reserved4:1; + u8 support_8bpc:1; + u8 support_10bpc:1; + u8 support_12bpc:1; + u8 reserved5:4; + + u16 slice_height; +} __packed; + +struct bdb_compression_parameters { + u16 entry_size; + struct dsc_compression_parameters_entry data[16]; +} __packed; + #endif /* _INTEL_VBT_DEFS_H_ */ diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index d4fb7f16f9f6..896b0c334f5e 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -322,8 +322,8 @@ static int get_column_index_for_rc_params(u8 bits_per_component) int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, struct intel_crtc_state *pipe_config) { - struct drm_dsc_config *vdsc_cfg = &pipe_config->dp_dsc_cfg; - u16 compressed_bpp = pipe_config->dsc_params.compressed_bpp; + struct drm_dsc_config *vdsc_cfg = &pipe_config->dsc.config; + u16 compressed_bpp = pipe_config->dsc.compressed_bpp; u8 i = 0; int row_index = 0; int column_index = 0; @@ -332,7 +332,7 @@ int intel_dp_compute_dsc_params(struct intel_dp *intel_dp, vdsc_cfg->pic_width = pipe_config->base.adjusted_mode.crtc_hdisplay; vdsc_cfg->pic_height = pipe_config->base.adjusted_mode.crtc_vdisplay; vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width, - pipe_config->dsc_params.slice_count); + pipe_config->dsc.slice_count); /* * Slice Height of 8 works for all currently available panels. So start * with that if pic_height is an integral multiple of 8. @@ -485,13 +485,13 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, { struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg; + const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; enum pipe pipe = crtc->pipe; enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; u32 pps_val = 0; u32 rc_buf_thresh_dword[4]; u32 rc_range_params_dword[8]; - u8 num_vdsc_instances = (crtc_state->dsc_params.dsc_split) ? 2 : 1; + u8 num_vdsc_instances = (crtc_state->dsc.dsc_split) ? 2 : 1; int i = 0; /* Populate PICTURE_PARAMETER_SET_0 registers */ @@ -514,11 +514,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe), pps_val); } @@ -533,11 +533,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe), pps_val); } @@ -553,11 +553,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe), pps_val); } @@ -573,11 +573,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe), pps_val); } @@ -593,11 +593,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe), pps_val); } @@ -613,11 +613,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe), pps_val); } @@ -635,11 +635,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe), pps_val); } @@ -655,11 +655,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe), pps_val); } @@ -675,11 +675,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe), pps_val); } @@ -695,11 +695,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe), pps_val); } @@ -717,11 +717,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe), pps_val); } @@ -740,11 +740,11 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, * If 2 VDSC instances are needed, configure PPS for second * VDSC */ - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val); } else { I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val); - if (crtc_state->dsc_params.dsc_split) + if (crtc_state->dsc.dsc_split) I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe), pps_val); } @@ -763,7 +763,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]); I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]); I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]); - if (crtc_state->dsc_params.dsc_split) { + if (crtc_state->dsc.dsc_split) { I915_WRITE(DSCC_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]); I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW, @@ -782,7 +782,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, rc_buf_thresh_dword[2]); I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe), rc_buf_thresh_dword[3]); - if (crtc_state->dsc_params.dsc_split) { + if (crtc_state->dsc.dsc_split) { I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe), rc_buf_thresh_dword[0]); I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe), @@ -824,7 +824,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, rc_range_params_dword[6]); I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW, rc_range_params_dword[7]); - if (crtc_state->dsc_params.dsc_split) { + if (crtc_state->dsc.dsc_split) { I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0, rc_range_params_dword[0]); I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW, @@ -859,7 +859,7 @@ static void intel_configure_pps_for_dsc_encoder(struct intel_encoder *encoder, rc_range_params_dword[6]); I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe), rc_range_params_dword[7]); - if (crtc_state->dsc_params.dsc_split) { + if (crtc_state->dsc.dsc_split) { I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe), rc_range_params_dword[0]); I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe), @@ -885,7 +885,7 @@ static void intel_dp_write_dsc_pps_sdp(struct intel_encoder *encoder, { struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp); - const struct drm_dsc_config *vdsc_cfg = &crtc_state->dp_dsc_cfg; + const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config; struct drm_dsc_pps_infoframe dp_dsc_pps_sdp; /* Prepare DP SDP PPS header as per DP 1.4 spec, Table 2-123 */ @@ -909,7 +909,7 @@ void intel_dsc_enable(struct intel_encoder *encoder, u32 dss_ctl1_val = 0; u32 dss_ctl2_val = 0; - if (!crtc_state->dsc_params.compression_enable) + if (!crtc_state->dsc.compression_enable) return; /* Enable Power wells for VDSC/joining */ @@ -928,7 +928,7 @@ void intel_dsc_enable(struct intel_encoder *encoder, dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe); } dss_ctl2_val |= LEFT_BRANCH_VDSC_ENABLE; - if (crtc_state->dsc_params.dsc_split) { + if (crtc_state->dsc.dsc_split) { dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE; dss_ctl1_val |= JOINER_ENABLE; } @@ -944,7 +944,7 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state) i915_reg_t dss_ctl1_reg, dss_ctl2_reg; u32 dss_ctl1_val = 0, dss_ctl2_val = 0; - if (!old_crtc_state->dsc_params.compression_enable) + if (!old_crtc_state->dsc.compression_enable) return; if (old_crtc_state->cpu_transcoder == TRANSCODER_EDP) { diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c new file mode 100644 index 000000000000..2ff7293986d4 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_vga.c @@ -0,0 +1,160 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include <linux/pci.h> +#include <linux/vgaarb.h> + +#include <drm/i915_drm.h> + +#include "i915_drv.h" +#include "intel_vga.h" + +static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915) +{ + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + return VLV_VGACNTRL; + else if (INTEL_GEN(i915) >= 5) + return CPU_VGACNTRL; + else + return VGACNTRL; +} + +/* Disable the VGA plane that we never use */ +void intel_vga_disable(struct drm_i915_private *dev_priv) +{ + struct pci_dev *pdev = dev_priv->drm.pdev; + i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv); + u8 sr1; + + /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */ + vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); + outb(SR01, VGA_SR_INDEX); + sr1 = inb(VGA_SR_DATA); + outb(sr1 | 1 << 5, VGA_SR_DATA); + vga_put(pdev, VGA_RSRC_LEGACY_IO); + udelay(300); + + I915_WRITE(vga_reg, VGA_DISP_DISABLE); + POSTING_READ(vga_reg); +} + +void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv) +{ + i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv); + + if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) { + DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); + intel_vga_disable(dev_priv); + } +} + +void intel_vga_redisable(struct drm_i915_private *i915) +{ + intel_wakeref_t wakeref; + + /* + * This function can be called both from intel_modeset_setup_hw_state or + * at a very early point in our resume sequence, where the power well + * structures are not yet restored. Since this function is at a very + * paranoid "someone might have enabled VGA while we were not looking" + * level, just check if the power well is enabled instead of trying to + * follow the "don't touch the power well if we don't need it" policy + * the rest of the driver uses. + */ + wakeref = intel_display_power_get_if_enabled(i915, POWER_DOMAIN_VGA); + if (!wakeref) + return; + + intel_vga_redisable_power_on(i915); + + intel_display_power_put(i915, POWER_DOMAIN_VGA, wakeref); +} + +void intel_vga_reset_io_mem(struct drm_i915_private *i915) +{ + struct pci_dev *pdev = i915->drm.pdev; + + /* + * After we re-enable the power well, if we touch VGA register 0x3d5 + * we'll get unclaimed register interrupts. This stops after we write + * anything to the VGA MSR register. The vgacon module uses this + * register all the time, so if we unbind our driver and, as a + * consequence, bind vgacon, we'll get stuck in an infinite loop at + * console_unlock(). So make here we touch the VGA MSR register, making + * sure vgacon can keep working normally without triggering interrupts + * and error messages. + */ + vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO); + outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); + vga_put(pdev, VGA_RSRC_LEGACY_IO); +} + +static int +intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode) +{ + unsigned int reg = INTEL_GEN(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; + u16 gmch_ctrl; + + if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) { + DRM_ERROR("failed to read control word\n"); + return -EIO; + } + + if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode) + return 0; + + if (enable_decode) + gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; + else + gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; + + if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) { + DRM_ERROR("failed to write control word\n"); + return -EIO; + } + + return 0; +} + +static unsigned int +intel_vga_set_decode(void *cookie, bool enable_decode) +{ + struct drm_i915_private *i915 = cookie; + + intel_vga_set_state(i915, enable_decode); + + if (enable_decode) + return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | + VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; + else + return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; +} + +int intel_vga_register(struct drm_i915_private *i915) +{ + struct pci_dev *pdev = i915->drm.pdev; + int ret; + + /* + * If we have > 1 VGA cards, then we need to arbitrate access to the + * common VGA resources. + * + * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), + * then we do not take part in VGA arbitration and the + * vga_client_register() fails with -ENODEV. + */ + ret = vga_client_register(pdev, i915, NULL, intel_vga_set_decode); + if (ret && ret != -ENODEV) + return ret; + + return 0; +} + +void intel_vga_unregister(struct drm_i915_private *i915) +{ + struct pci_dev *pdev = i915->drm.pdev; + + vga_client_register(pdev, NULL, NULL, NULL); +} diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h new file mode 100644 index 000000000000..ba5b55b917f0 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_vga.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_VGA_H__ +#define __INTEL_VGA_H__ + +struct drm_i915_private; + +void intel_vga_reset_io_mem(struct drm_i915_private *i915); +void intel_vga_disable(struct drm_i915_private *i915); +void intel_vga_redisable(struct drm_i915_private *i915); +void intel_vga_redisable_power_on(struct drm_i915_private *i915); +int intel_vga_register(struct drm_i915_private *i915); +void intel_vga_unregister(struct drm_i915_private *i915); + +#endif /* __INTEL_VGA_H__ */ diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index a71b22bdd95b..0ca49b1604c6 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -749,7 +749,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder, struct drm_crtc *crtc = pipe_config->base.crtc; struct drm_i915_private *dev_priv = to_i915(crtc->dev); struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pipe = intel_crtc->pipe; + enum pipe pipe = intel_crtc->pipe; enum port port; u32 val; bool glk_cold_boot = false; @@ -1870,11 +1870,11 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv) * port C. BXT isn't limited like this. */ if (IS_GEN9_LP(dev_priv)) - intel_encoder->crtc_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C); + intel_encoder->pipe_mask = ~0; else if (port == PORT_A) - intel_encoder->crtc_mask = BIT(PIPE_A); + intel_encoder->pipe_mask = BIT(PIPE_A); else - intel_encoder->crtc_mask = BIT(PIPE_B); + intel_encoder->pipe_mask = BIT(PIPE_B); if (dev_priv->vbt.dsi.config->dual_link) intel_dsi->ports = BIT(PORT_A) | BIT(PORT_C); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c index f99920652751..81366aa4812b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c @@ -155,7 +155,6 @@ static void clear_pages_dma_fence_cb(struct dma_fence *fence, static void clear_pages_worker(struct work_struct *work) { struct clear_pages_work *w = container_of(work, typeof(*w), work); - struct drm_i915_private *i915 = w->ce->engine->i915; struct drm_i915_gem_object *obj = w->sleeve->vma->obj; struct i915_vma *vma = w->sleeve->vma; struct i915_request *rq; @@ -173,11 +172,9 @@ static void clear_pages_worker(struct work_struct *work) obj->read_domains = I915_GEM_GPU_DOMAINS; obj->write_domain = 0; - /* XXX: we need to kill this */ - mutex_lock(&i915->drm.struct_mutex); err = i915_vma_pin(vma, 0, 0, PIN_USER); if (unlikely(err)) - goto out_unlock; + goto out_signal; batch = intel_emit_vma_fill_blt(w->ce, vma, w->value); if (IS_ERR(batch)) { @@ -211,7 +208,7 @@ static void clear_pages_worker(struct work_struct *work) * keep track of the GPU activity within this vma/request, and * propagate the signal from the request to w->dma. */ - err = i915_active_ref(&vma->active, rq->timeline, rq); + err = __i915_vma_move_to_active(vma, rq); if (err) goto out_request; @@ -229,8 +226,6 @@ out_batch: intel_emit_vma_release(w->ce, batch); out_unpin: i915_vma_unpin(vma); -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); out_signal: if (unlikely(err)) { dma_fence_set_error(&w->dma, err); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c index 755c4542629f..e553ca8d98eb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c @@ -69,8 +69,10 @@ #include <drm/i915_drm.h> -#include "gt/intel_lrc_reg.h" +#include "gt/intel_engine_heartbeat.h" #include "gt/intel_engine_user.h" +#include "gt/intel_lrc_reg.h" +#include "gt/intel_ring.h" #include "i915_gem_context.h" #include "i915_globals.h" @@ -167,97 +169,6 @@ lookup_user_engine(struct i915_gem_context *ctx, return i915_gem_context_get_engine(ctx, idx); } -static inline int new_hw_id(struct drm_i915_private *i915, gfp_t gfp) -{ - unsigned int max; - - lockdep_assert_held(&i915->contexts.mutex); - - if (INTEL_GEN(i915) >= 12) - max = GEN12_MAX_CONTEXT_HW_ID; - else if (INTEL_GEN(i915) >= 11) - max = GEN11_MAX_CONTEXT_HW_ID; - else if (USES_GUC_SUBMISSION(i915)) - /* - * When using GuC in proxy submission, GuC consumes the - * highest bit in the context id to indicate proxy submission. - */ - max = MAX_GUC_CONTEXT_HW_ID; - else - max = MAX_CONTEXT_HW_ID; - - return ida_simple_get(&i915->contexts.hw_ida, 0, max, gfp); -} - -static int steal_hw_id(struct drm_i915_private *i915) -{ - struct i915_gem_context *ctx, *cn; - LIST_HEAD(pinned); - int id = -ENOSPC; - - lockdep_assert_held(&i915->contexts.mutex); - - list_for_each_entry_safe(ctx, cn, - &i915->contexts.hw_id_list, hw_id_link) { - if (atomic_read(&ctx->hw_id_pin_count)) { - list_move_tail(&ctx->hw_id_link, &pinned); - continue; - } - - GEM_BUG_ON(!ctx->hw_id); /* perma-pinned kernel context */ - list_del_init(&ctx->hw_id_link); - id = ctx->hw_id; - break; - } - - /* - * Remember how far we got up on the last repossesion scan, so the - * list is kept in a "least recently scanned" order. - */ - list_splice_tail(&pinned, &i915->contexts.hw_id_list); - return id; -} - -static int assign_hw_id(struct drm_i915_private *i915, unsigned int *out) -{ - int ret; - - lockdep_assert_held(&i915->contexts.mutex); - - /* - * We prefer to steal/stall ourselves and our users over that of the - * entire system. That may be a little unfair to our users, and - * even hurt high priority clients. The choice is whether to oomkill - * something else, or steal a context id. - */ - ret = new_hw_id(i915, GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); - if (unlikely(ret < 0)) { - ret = steal_hw_id(i915); - if (ret < 0) /* once again for the correct errno code */ - ret = new_hw_id(i915, GFP_KERNEL); - if (ret < 0) - return ret; - } - - *out = ret; - return 0; -} - -static void release_hw_id(struct i915_gem_context *ctx) -{ - struct drm_i915_private *i915 = ctx->i915; - - if (list_empty(&ctx->hw_id_link)) - return; - - mutex_lock(&i915->contexts.mutex); - if (!list_empty(&ctx->hw_id_link)) { - ida_simple_remove(&i915->contexts.hw_ida, ctx->hw_id); - list_del_init(&ctx->hw_id_link); - } - mutex_unlock(&i915->contexts.mutex); -} - static void __free_engines(struct i915_gem_engines *e, unsigned int count) { while (count--) { @@ -294,27 +205,33 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx) for_each_engine(engine, gt, id) { struct intel_context *ce; + if (engine->legacy_idx == INVALID_ENGINE) + continue; + + GEM_BUG_ON(engine->legacy_idx >= I915_NUM_ENGINES); + GEM_BUG_ON(e->engines[engine->legacy_idx]); + ce = intel_context_create(ctx, engine); if (IS_ERR(ce)) { - __free_engines(e, id); + __free_engines(e, e->num_engines + 1); return ERR_CAST(ce); } - e->engines[id] = ce; - e->num_engines = id + 1; + e->engines[engine->legacy_idx] = ce; + e->num_engines = max(e->num_engines, engine->legacy_idx); } + e->num_engines++; return e; } static void i915_gem_context_free(struct i915_gem_context *ctx) { - lockdep_assert_held(&ctx->i915->drm.struct_mutex); GEM_BUG_ON(!i915_gem_context_is_closed(ctx)); - release_hw_id(ctx); - if (ctx->vm) - i915_vm_put(ctx->vm); + spin_lock(&ctx->i915->gem.contexts.lock); + list_del(&ctx->link); + spin_unlock(&ctx->i915->gem.contexts.lock); free_engines(rcu_access_pointer(ctx->engines)); mutex_destroy(&ctx->engines_mutex); @@ -327,70 +244,202 @@ static void i915_gem_context_free(struct i915_gem_context *ctx) kfree(ctx->name); put_pid(ctx->pid); - list_del(&ctx->link); mutex_destroy(&ctx->mutex); kfree_rcu(ctx, rcu); } -static void contexts_free(struct drm_i915_private *i915) +static void contexts_free_all(struct llist_node *list) { - struct llist_node *freed = llist_del_all(&i915->contexts.free_list); struct i915_gem_context *ctx, *cn; - lockdep_assert_held(&i915->drm.struct_mutex); - - llist_for_each_entry_safe(ctx, cn, freed, free_link) + llist_for_each_entry_safe(ctx, cn, list, free_link) i915_gem_context_free(ctx); } -static void contexts_free_first(struct drm_i915_private *i915) +static void contexts_flush_free(struct i915_gem_contexts *gc) { - struct i915_gem_context *ctx; - struct llist_node *freed; - - lockdep_assert_held(&i915->drm.struct_mutex); - - freed = llist_del_first(&i915->contexts.free_list); - if (!freed) - return; - - ctx = container_of(freed, typeof(*ctx), free_link); - i915_gem_context_free(ctx); + contexts_free_all(llist_del_all(&gc->free_list)); } static void contexts_free_worker(struct work_struct *work) { - struct drm_i915_private *i915 = - container_of(work, typeof(*i915), contexts.free_work); + struct i915_gem_contexts *gc = + container_of(work, typeof(*gc), free_work); - mutex_lock(&i915->drm.struct_mutex); - contexts_free(i915); - mutex_unlock(&i915->drm.struct_mutex); + contexts_flush_free(gc); } void i915_gem_context_release(struct kref *ref) { struct i915_gem_context *ctx = container_of(ref, typeof(*ctx), ref); - struct drm_i915_private *i915 = ctx->i915; + struct i915_gem_contexts *gc = &ctx->i915->gem.contexts; trace_i915_context_free(ctx); - if (llist_add(&ctx->free_link, &i915->contexts.free_list)) - queue_work(i915->wq, &i915->contexts.free_work); + if (llist_add(&ctx->free_link, &gc->free_list)) + schedule_work(&gc->free_work); } -static void context_close(struct i915_gem_context *ctx) +static inline struct i915_gem_engines * +__context_engines_static(const struct i915_gem_context *ctx) { - mutex_lock(&ctx->mutex); + return rcu_dereference_protected(ctx->engines, true); +} - i915_gem_context_set_closed(ctx); - ctx->file_priv = ERR_PTR(-EBADF); +static bool __reset_engine(struct intel_engine_cs *engine) +{ + struct intel_gt *gt = engine->gt; + bool success = false; + + if (!intel_has_reset_engine(gt)) + return false; + + if (!test_and_set_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags)) { + success = intel_engine_reset(engine, NULL) == 0; + clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, + >->reset.flags); + } + + return success; +} + +static void __reset_context(struct i915_gem_context *ctx, + struct intel_engine_cs *engine) +{ + intel_gt_handle_error(engine->gt, engine->mask, 0, + "context closure in %s", ctx->name); +} + +static bool __cancel_engine(struct intel_engine_cs *engine) +{ + /* + * Send a "high priority pulse" down the engine to cause the + * current request to be momentarily preempted. (If it fails to + * be preempted, it will be reset). As we have marked our context + * as banned, any incomplete request, including any running, will + * be skipped following the preemption. + * + * If there is no hangchecking (one of the reasons why we try to + * cancel the context) and no forced preemption, there may be no + * means by which we reset the GPU and evict the persistent hog. + * Ergo if we are unable to inject a preemptive pulse that can + * kill the banned context, we fallback to doing a local reset + * instead. + */ + if (IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT) && + !intel_engine_pulse(engine)) + return true; + + /* If we are unable to send a pulse, try resetting this engine. */ + return __reset_engine(engine); +} + +static struct intel_engine_cs *__active_engine(struct i915_request *rq) +{ + struct intel_engine_cs *engine, *locked; + + /* + * Serialise with __i915_request_submit() so that it sees + * is-banned?, or we know the request is already inflight. + */ + locked = READ_ONCE(rq->engine); + spin_lock_irq(&locked->active.lock); + while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { + spin_unlock(&locked->active.lock); + spin_lock(&engine->active.lock); + locked = engine; + } + + engine = NULL; + if (i915_request_is_active(rq) && !rq->fence.error) + engine = rq->engine; + + spin_unlock_irq(&locked->active.lock); + + return engine; +} + +static struct intel_engine_cs *active_engine(struct intel_context *ce) +{ + struct intel_engine_cs *engine = NULL; + struct i915_request *rq; + + if (!ce->timeline) + return NULL; + + rcu_read_lock(); + list_for_each_entry_reverse(rq, &ce->timeline->requests, link) { + if (i915_request_completed(rq)) + break; + + /* Check with the backend if the request is inflight */ + engine = __active_engine(rq); + if (engine) + break; + } + rcu_read_unlock(); + + return engine; +} + +static void kill_context(struct i915_gem_context *ctx) +{ + struct i915_gem_engines_iter it; + struct intel_context *ce; + + /* + * If we are already banned, it was due to a guilty request causing + * a reset and the entire context being evicted from the GPU. + */ + if (i915_gem_context_is_banned(ctx)) + return; + + i915_gem_context_set_banned(ctx); /* - * This context will never again be assinged to HW, so we can - * reuse its ID for the next context. + * Map the user's engine back to the actual engines; one virtual + * engine will be mapped to multiple engines, and using ctx->engine[] + * the same engine may be have multiple instances in the user's map. + * However, we only care about pending requests, so only include + * engines on which there are incomplete requests. */ - release_hw_id(ctx); + for_each_gem_engine(ce, __context_engines_static(ctx), it) { + struct intel_engine_cs *engine; + + /* + * Check the current active state of this context; if we + * are currently executing on the GPU we need to evict + * ourselves. On the other hand, if we haven't yet been + * submitted to the GPU or if everything is complete, + * we have nothing to do. + */ + engine = active_engine(ce); + + /* First attempt to gracefully cancel the context */ + if (engine && !__cancel_engine(engine)) + /* + * If we are unable to send a preemptive pulse to bump + * the context from the GPU, we have to resort to a full + * reset. We hope the collateral damage is worth it. + */ + __reset_context(ctx, engine); + } +} + +static void context_close(struct i915_gem_context *ctx) +{ + struct i915_address_space *vm; + + i915_gem_context_set_closed(ctx); + + mutex_lock(&ctx->mutex); + + vm = i915_gem_context_vm(ctx); + if (vm) + i915_vm_close(vm); + + ctx->file_priv = ERR_PTR(-EBADF); /* * The LUT uses the VMA as a backpointer to unref the object, @@ -400,9 +449,47 @@ static void context_close(struct i915_gem_context *ctx) lut_close(ctx); mutex_unlock(&ctx->mutex); + + /* + * If the user has disabled hangchecking, we can not be sure that + * the batches will ever complete after the context is closed, + * keeping the context and all resources pinned forever. So in this + * case we opt to forcibly kill off all remaining requests on + * context close. + */ + if (!i915_gem_context_is_persistent(ctx) || + !i915_modparams.enable_hangcheck) + kill_context(ctx); + i915_gem_context_put(ctx); } +static int __context_set_persistence(struct i915_gem_context *ctx, bool state) +{ + if (i915_gem_context_is_persistent(ctx) == state) + return 0; + + if (state) { + /* + * Only contexts that are short-lived [that will expire or be + * reset] are allowed to survive past termination. We require + * hangcheck to ensure that the persistent requests are healthy. + */ + if (!i915_modparams.enable_hangcheck) + return -EINVAL; + + i915_gem_context_set_persistence(ctx); + } else { + /* To cancel a context we use "preempt-to-idle" */ + if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) + return -ENODEV; + + i915_gem_context_clear_persistence(ctx); + } + + return 0; +} + static struct i915_gem_context * __create_context(struct drm_i915_private *i915) { @@ -416,7 +503,6 @@ __create_context(struct drm_i915_private *i915) return ERR_PTR(-ENOMEM); kref_init(&ctx->ref); - list_add_tail(&ctx->link, &i915->contexts.list); ctx->i915 = i915; ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL); mutex_init(&ctx->mutex); @@ -430,7 +516,6 @@ __create_context(struct drm_i915_private *i915) RCU_INIT_POINTER(ctx->engines, e); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); - INIT_LIST_HEAD(&ctx->hw_id_link); /* NB: Mark all slices as needing a remap so that when the context first * loads it will restore whatever remap state already exists. If there @@ -439,6 +524,7 @@ __create_context(struct drm_i915_private *i915) i915_gem_context_set_bannable(ctx); i915_gem_context_set_recoverable(ctx); + __context_set_persistence(ctx, true /* cgroup hook? */); for (i = 0; i < ARRAY_SIZE(ctx->hang_timestamp); i++) ctx->hang_timestamp[i] = jiffies - CONTEXT_FAST_HANG_JIFFIES; @@ -446,6 +532,10 @@ __create_context(struct drm_i915_private *i915) ctx->jump_whitelist = NULL; ctx->jump_whitelist_cmds = 0; + spin_lock(&i915->gem.contexts.lock); + list_add_tail(&ctx->link, &i915->gem.contexts.list); + spin_unlock(&i915->gem.contexts.lock); + return ctx; err_free: @@ -475,11 +565,11 @@ static void __apply_ppgtt(struct intel_context *ce, void *vm) static struct i915_address_space * __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) { - struct i915_address_space *old = ctx->vm; + struct i915_address_space *old = i915_gem_context_vm(ctx); GEM_BUG_ON(old && i915_vm_is_4lvl(vm) != i915_vm_is_4lvl(old)); - ctx->vm = i915_vm_get(vm); + rcu_assign_pointer(ctx->vm, i915_vm_open(vm)); context_apply_all(ctx, __apply_ppgtt, vm); return old; @@ -488,12 +578,12 @@ __set_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) static void __assign_ppgtt(struct i915_gem_context *ctx, struct i915_address_space *vm) { - if (vm == ctx->vm) + if (vm == rcu_access_pointer(ctx->vm)) return; vm = __set_ppgtt(ctx, vm); if (vm) - i915_vm_put(vm); + i915_vm_close(vm); } static void __set_timeline(struct intel_timeline **dst, @@ -520,27 +610,25 @@ static void __assign_timeline(struct i915_gem_context *ctx, } static struct i915_gem_context * -i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) +i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags) { struct i915_gem_context *ctx; - lockdep_assert_held(&dev_priv->drm.struct_mutex); - if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE && - !HAS_EXECLISTS(dev_priv)) + !HAS_EXECLISTS(i915)) return ERR_PTR(-EINVAL); - /* Reap the most stale context */ - contexts_free_first(dev_priv); + /* Reap the stale contexts */ + contexts_flush_free(&i915->gem.contexts); - ctx = __create_context(dev_priv); + ctx = __create_context(i915); if (IS_ERR(ctx)) return ctx; - if (HAS_FULL_PPGTT(dev_priv)) { + if (HAS_FULL_PPGTT(i915)) { struct i915_ppgtt *ppgtt; - ppgtt = i915_ppgtt_create(dev_priv); + ppgtt = i915_ppgtt_create(i915); if (IS_ERR(ppgtt)) { DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n", PTR_ERR(ppgtt)); @@ -548,14 +636,17 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags) return ERR_CAST(ppgtt); } + mutex_lock(&ctx->mutex); __assign_ppgtt(ctx, &ppgtt->vm); + mutex_unlock(&ctx->mutex); + i915_vm_put(&ppgtt->vm); } if (flags & I915_CONTEXT_CREATE_FLAGS_SINGLE_TIMELINE) { struct intel_timeline *timeline; - timeline = intel_timeline_create(&dev_priv->gt, NULL); + timeline = intel_timeline_create(&i915->gt, NULL); if (IS_ERR(timeline)) { context_close(ctx); return ERR_CAST(timeline); @@ -587,19 +678,13 @@ struct i915_gem_context * i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) { struct i915_gem_context *ctx; - int err; ctx = i915_gem_create_context(i915, 0); if (IS_ERR(ctx)) return ctx; - err = i915_gem_context_pin_hw_id(ctx); - if (err) { - destroy_kernel_context(&ctx); - return ERR_PTR(err); - } - i915_gem_context_clear_bannable(ctx); + i915_gem_context_set_persistence(ctx); ctx->sched.priority = I915_USER_PRIORITY(prio); GEM_BUG_ON(!i915_gem_context_is_kernel(ctx)); @@ -607,62 +692,42 @@ i915_gem_context_create_kernel(struct drm_i915_private *i915, int prio) return ctx; } -static void init_contexts(struct drm_i915_private *i915) +static void init_contexts(struct i915_gem_contexts *gc) { - mutex_init(&i915->contexts.mutex); - INIT_LIST_HEAD(&i915->contexts.list); - - /* Using the simple ida interface, the max is limited by sizeof(int) */ - BUILD_BUG_ON(MAX_CONTEXT_HW_ID > INT_MAX); - BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > INT_MAX); - ida_init(&i915->contexts.hw_ida); - INIT_LIST_HEAD(&i915->contexts.hw_id_list); + spin_lock_init(&gc->lock); + INIT_LIST_HEAD(&gc->list); - INIT_WORK(&i915->contexts.free_work, contexts_free_worker); - init_llist_head(&i915->contexts.free_list); + INIT_WORK(&gc->free_work, contexts_free_worker); + init_llist_head(&gc->free_list); } -int i915_gem_contexts_init(struct drm_i915_private *dev_priv) +int i915_gem_init_contexts(struct drm_i915_private *i915) { struct i915_gem_context *ctx; /* Reassure ourselves we are only called once */ - GEM_BUG_ON(dev_priv->kernel_context); + GEM_BUG_ON(i915->kernel_context); - init_contexts(dev_priv); + init_contexts(&i915->gem.contexts); /* lowest priority; idle task */ - ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN); + ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MIN); if (IS_ERR(ctx)) { DRM_ERROR("Failed to create default global context\n"); return PTR_ERR(ctx); } - /* - * For easy recognisablity, we want the kernel context to be 0 and then - * all user contexts will have non-zero hw_id. Kernel contexts are - * permanently pinned, so that we never suffer a stall and can - * use them from any allocation context (e.g. for evicting other - * contexts and from inside the shrinker). - */ - GEM_BUG_ON(ctx->hw_id); - GEM_BUG_ON(!atomic_read(&ctx->hw_id_pin_count)); - dev_priv->kernel_context = ctx; + i915->kernel_context = ctx; DRM_DEBUG_DRIVER("%s context support initialized\n", - DRIVER_CAPS(dev_priv)->has_logical_contexts ? + DRIVER_CAPS(i915)->has_logical_contexts ? "logical" : "fake"); return 0; } -void i915_gem_contexts_fini(struct drm_i915_private *i915) +void i915_gem_driver_release__contexts(struct drm_i915_private *i915) { - lockdep_assert_held(&i915->drm.struct_mutex); - destroy_kernel_context(&i915->kernel_context); - - /* Must free all deferred contexts (via flush_workqueue) first */ - GEM_BUG_ON(!list_empty(&i915->contexts.hw_id_list)); - ida_destroy(&i915->contexts.hw_ida); + flush_work(&i915->gem.contexts.free_work); } static int context_idr_cleanup(int id, void *p, void *data) @@ -680,11 +745,16 @@ static int vm_idr_cleanup(int id, void *p, void *data) static int gem_context_register(struct i915_gem_context *ctx, struct drm_i915_file_private *fpriv) { + struct i915_address_space *vm; int ret; ctx->file_priv = fpriv; - if (ctx->vm) - ctx->vm->file = fpriv; + + mutex_lock(&ctx->mutex); + vm = i915_gem_context_vm(ctx); + if (vm) + WRITE_ONCE(vm->file, fpriv); /* XXX */ + mutex_unlock(&ctx->mutex); ctx->pid = get_task_pid(current, PIDTYPE_PID); ctx->name = kasprintf(GFP_KERNEL, "%s[%d]", @@ -721,9 +791,7 @@ int i915_gem_context_open(struct drm_i915_private *i915, idr_init(&file_priv->context_idr); idr_init_base(&file_priv->vm_idr, 1); - mutex_lock(&i915->drm.struct_mutex); ctx = i915_gem_create_context(i915, 0); - mutex_unlock(&i915->drm.struct_mutex); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto err; @@ -751,6 +819,7 @@ err: void i915_gem_context_close(struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; + struct drm_i915_private *i915 = file_priv->dev_priv; idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL); idr_destroy(&file_priv->context_idr); @@ -759,6 +828,8 @@ void i915_gem_context_close(struct drm_file *file) idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL); idr_destroy(&file_priv->vm_idr); mutex_destroy(&file_priv->vm_idr_lock); + + contexts_flush_free(&i915->gem.contexts); } int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data, @@ -851,6 +922,7 @@ struct context_barrier_task { void *data; }; +__i915_active_call static void cb_retire(struct i915_active *base) { struct context_barrier_task *cb = container_of(base, typeof(*cb), base); @@ -870,20 +942,18 @@ static int context_barrier_task(struct i915_gem_context *ctx, void (*task)(void *data), void *data) { - struct drm_i915_private *i915 = ctx->i915; struct context_barrier_task *cb; struct i915_gem_engines_iter it; struct intel_context *ce; int err = 0; - lockdep_assert_held(&i915->drm.struct_mutex); GEM_BUG_ON(!task); cb = kmalloc(sizeof(*cb), GFP_KERNEL); if (!cb) return -ENOMEM; - i915_active_init(i915, &cb->base, NULL, cb_retire); + i915_active_init(&cb->base, NULL, cb_retire); err = i915_active_acquire(&cb->base); if (err) { kfree(cb); @@ -915,7 +985,7 @@ static int context_barrier_task(struct i915_gem_context *ctx, if (emit) err = emit(rq, data); if (err == 0) - err = i915_active_ref(&cb->base, rq->timeline, rq); + err = i915_active_add_request(&cb->base, rq); i915_request_add(rq); if (err) @@ -938,16 +1008,12 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv, struct i915_address_space *vm; int ret; - if (!ctx->vm) + if (!rcu_access_pointer(ctx->vm)) return -ENODEV; - /* XXX rcu acquire? */ - ret = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); - if (ret) - return ret; - + rcu_read_lock(); vm = i915_vm_get(ctx->vm); - mutex_unlock(&ctx->i915->drm.struct_mutex); + rcu_read_unlock(); ret = mutex_lock_interruptible(&file_priv->vm_idr_lock); if (ret) @@ -958,7 +1024,7 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv, if (ret < 0) goto err_unlock; - i915_vm_get(vm); + i915_vm_open(vm); args->size = 0; args->value = ret; @@ -978,7 +1044,7 @@ static void set_ppgtt_barrier(void *data) if (INTEL_GEN(old->i915) < 8) gen6_ppgtt_unpin_all(i915_vm_to_ppgtt(old)); - i915_vm_put(old); + i915_vm_close(old); } static int emit_ppgtt_update(struct i915_request *rq, void *data) @@ -1008,12 +1074,18 @@ static int emit_ppgtt_update(struct i915_request *rq, void *data) intel_ring_advance(rq, cs); } else if (HAS_LOGICAL_RING_CONTEXTS(engine->i915)) { struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); + int err; + + /* Magic required to prevent forcewake errors! */ + err = engine->emit_flush(rq, EMIT_INVALIDATE); + if (err) + return err; cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); if (IS_ERR(cs)) return PTR_ERR(cs); - *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES); + *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; for (i = GEN8_3LVL_PDPES; i--; ) { const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); @@ -1050,34 +1122,34 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, if (args->size) return -EINVAL; - if (!ctx->vm) + if (!rcu_access_pointer(ctx->vm)) return -ENODEV; if (upper_32_bits(args->value)) return -ENOENT; - err = mutex_lock_interruptible(&file_priv->vm_idr_lock); - if (err) - return err; - + rcu_read_lock(); vm = idr_find(&file_priv->vm_idr, args->value); - if (vm) - i915_vm_get(vm); - mutex_unlock(&file_priv->vm_idr_lock); + if (vm && !kref_get_unless_zero(&vm->ref)) + vm = NULL; + rcu_read_unlock(); if (!vm) return -ENOENT; - err = mutex_lock_interruptible(&ctx->i915->drm.struct_mutex); + err = mutex_lock_interruptible(&ctx->mutex); if (err) goto out; - if (vm == ctx->vm) + if (i915_gem_context_is_closed(ctx)) { + err = -ENOENT; + goto unlock; + } + + if (vm == rcu_access_pointer(ctx->vm)) goto unlock; /* Teardown the existing obj:vma cache, it will have to be rebuilt. */ - mutex_lock(&ctx->mutex); lut_close(ctx); - mutex_unlock(&ctx->mutex); old = __set_ppgtt(ctx, vm); @@ -1092,13 +1164,12 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv, set_ppgtt_barrier, old); if (err) { - i915_vm_put(__set_ppgtt(ctx, old)); - i915_vm_put(old); + i915_vm_close(__set_ppgtt(ctx, old)); + i915_vm_close(old); } unlock: - mutex_unlock(&ctx->i915->drm.struct_mutex); - + mutex_unlock(&ctx->mutex); out: i915_vm_put(vm); return err; @@ -1117,7 +1188,7 @@ static int gen8_emit_rpcs_config(struct i915_request *rq, offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE + - (CTX_R_PWR_CLK_STATE + 1) * 4; + CTX_R_PWR_CLK_STATE * 4; *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = lower_32_bits(offset); @@ -1160,8 +1231,7 @@ gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu) } static int -__intel_context_reconfigure_sseu(struct intel_context *ce, - struct intel_sseu sseu) +intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu) { int ret; @@ -1185,23 +1255,6 @@ unlock: } static int -intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu) -{ - struct drm_i915_private *i915 = ce->engine->i915; - int ret; - - ret = mutex_lock_interruptible(&i915->drm.struct_mutex); - if (ret) - return ret; - - ret = __intel_context_reconfigure_sseu(ce, sseu); - - mutex_unlock(&i915->drm.struct_mutex); - - return ret; -} - -static int user_to_context_sseu(struct drm_i915_private *i915, const struct drm_i915_gem_context_param_sseu *user, struct intel_sseu *context) @@ -1743,6 +1796,16 @@ err_free: return err; } +static int +set_persistence(struct i915_gem_context *ctx, + const struct drm_i915_gem_context_param *args) +{ + if (args->size) + return -EINVAL; + + return __context_set_persistence(ctx, args->value); +} + static int ctx_setparam(struct drm_i915_file_private *fpriv, struct i915_gem_context *ctx, struct drm_i915_gem_context_param *args) @@ -1820,6 +1883,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv, ret = set_engines(ctx, args); break; + case I915_CONTEXT_PARAM_PERSISTENCE: + ret = set_persistence(ctx, args); + break; + case I915_CONTEXT_PARAM_BAN_PERIOD: default: ret = -EINVAL; @@ -1972,10 +2039,11 @@ static int clone_vm(struct i915_gem_context *dst, struct i915_gem_context *src) { struct i915_address_space *vm; + int err = 0; rcu_read_lock(); do { - vm = READ_ONCE(src->vm); + vm = rcu_dereference(src->vm); if (!vm) break; @@ -1997,7 +2065,7 @@ static int clone_vm(struct i915_gem_context *dst, * it cannot be reallocated elsewhere. */ - if (vm == READ_ONCE(src->vm)) + if (vm == rcu_access_pointer(src->vm)) break; i915_vm_put(vm); @@ -2005,11 +2073,16 @@ static int clone_vm(struct i915_gem_context *dst, rcu_read_unlock(); if (vm) { - __assign_ppgtt(dst, vm); + if (!mutex_lock_interruptible(&dst->mutex)) { + __assign_ppgtt(dst, vm); + mutex_unlock(&dst->mutex); + } else { + err = -EINTR; + } i915_vm_put(vm); } - return 0; + return err; } static int create_clone(struct i915_user_extension __user *ext, void *data) @@ -2099,12 +2172,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data, return -EIO; } - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - ext_data.ctx = i915_gem_create_context(i915, args->flags); - mutex_unlock(&dev->struct_mutex); if (IS_ERR(ext_data.ctx)) return PTR_ERR(ext_data.ctx); @@ -2231,12 +2299,12 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, case I915_CONTEXT_PARAM_GTT_SIZE: args->size = 0; - if (ctx->vm) - args->value = ctx->vm->total; - else if (to_i915(dev)->ggtt.alias) - args->value = to_i915(dev)->ggtt.alias->vm.total; + rcu_read_lock(); + if (rcu_access_pointer(ctx->vm)) + args->value = rcu_dereference(ctx->vm)->total; else args->value = to_i915(dev)->ggtt.vm.total; + rcu_read_unlock(); break; case I915_CONTEXT_PARAM_NO_ERROR_CAPTURE: @@ -2271,6 +2339,11 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data, ret = get_engines(ctx, args); break; + case I915_CONTEXT_PARAM_PERSISTENCE: + args->size = 0; + args->value = i915_gem_context_is_persistent(ctx); + break; + case I915_CONTEXT_PARAM_BAN_PERIOD: default: ret = -EINVAL; @@ -2302,7 +2375,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data, int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); struct drm_i915_reset_stats *args = data; struct i915_gem_context *ctx; int ret; @@ -2324,7 +2397,7 @@ int i915_gem_context_reset_stats_ioctl(struct drm_device *dev, */ if (capable(CAP_SYS_ADMIN)) - args->reset_count = i915_reset_count(&dev_priv->gpu_error); + args->reset_count = i915_reset_count(&i915->gpu_error); else args->reset_count = 0; @@ -2337,33 +2410,6 @@ out: return ret; } -int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx) -{ - struct drm_i915_private *i915 = ctx->i915; - int err = 0; - - mutex_lock(&i915->contexts.mutex); - - GEM_BUG_ON(i915_gem_context_is_closed(ctx)); - - if (list_empty(&ctx->hw_id_link)) { - GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count)); - - err = assign_hw_id(i915, &ctx->hw_id); - if (err) - goto out_unlock; - - list_add_tail(&ctx->hw_id_link, &i915->contexts.hw_id_list); - } - - GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == ~0u); - atomic_inc(&ctx->hw_id_pin_count); - -out_unlock: - mutex_unlock(&i915->contexts.mutex); - return err; -} - /* GEM context-engines iterator: for_each_gem_engine() */ struct intel_context * i915_gem_engines_iter_next(struct i915_gem_engines_iter *it) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h index 176978608b6f..18e50a769a6e 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h @@ -11,7 +11,9 @@ #include "gt/intel_context.h" +#include "i915_drv.h" #include "i915_gem.h" +#include "i915_gem_gtt.h" #include "i915_scheduler.h" #include "intel_device_info.h" @@ -74,6 +76,21 @@ static inline void i915_gem_context_clear_recoverable(struct i915_gem_context *c clear_bit(UCONTEXT_RECOVERABLE, &ctx->user_flags); } +static inline bool i915_gem_context_is_persistent(const struct i915_gem_context *ctx) +{ + return test_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags); +} + +static inline void i915_gem_context_set_persistence(struct i915_gem_context *ctx) +{ + set_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags); +} + +static inline void i915_gem_context_clear_persistence(struct i915_gem_context *ctx) +{ + clear_bit(UCONTEXT_PERSISTENCE, &ctx->user_flags); +} + static inline bool i915_gem_context_is_banned(const struct i915_gem_context *ctx) { return test_bit(CONTEXT_BANNED, &ctx->flags); @@ -112,19 +129,22 @@ i915_gem_context_clear_user_engines(struct i915_gem_context *ctx) clear_bit(CONTEXT_USER_ENGINES, &ctx->flags); } -int __i915_gem_context_pin_hw_id(struct i915_gem_context *ctx); -static inline int i915_gem_context_pin_hw_id(struct i915_gem_context *ctx) +static inline bool +i915_gem_context_nopreempt(const struct i915_gem_context *ctx) { - if (atomic_inc_not_zero(&ctx->hw_id_pin_count)) - return 0; + return test_bit(CONTEXT_NOPREEMPT, &ctx->flags); +} - return __i915_gem_context_pin_hw_id(ctx); +static inline void +i915_gem_context_set_nopreempt(struct i915_gem_context *ctx) +{ + set_bit(CONTEXT_NOPREEMPT, &ctx->flags); } -static inline void i915_gem_context_unpin_hw_id(struct i915_gem_context *ctx) +static inline void +i915_gem_context_clear_nopreempt(struct i915_gem_context *ctx) { - GEM_BUG_ON(atomic_read(&ctx->hw_id_pin_count) == 0u); - atomic_dec(&ctx->hw_id_pin_count); + clear_bit(CONTEXT_NOPREEMPT, &ctx->flags); } static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) @@ -133,8 +153,8 @@ static inline bool i915_gem_context_is_kernel(struct i915_gem_context *ctx) } /* i915_gem_context.c */ -int __must_check i915_gem_contexts_init(struct drm_i915_private *dev_priv); -void i915_gem_contexts_fini(struct drm_i915_private *dev_priv); +int __must_check i915_gem_init_contexts(struct drm_i915_private *i915); +void i915_gem_driver_release__contexts(struct drm_i915_private *i915); int i915_gem_context_open(struct drm_i915_private *i915, struct drm_file *file); @@ -173,6 +193,27 @@ static inline void i915_gem_context_put(struct i915_gem_context *ctx) kref_put(&ctx->ref, i915_gem_context_release); } +static inline struct i915_address_space * +i915_gem_context_vm(struct i915_gem_context *ctx) +{ + return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex)); +} + +static inline struct i915_address_space * +i915_gem_context_get_vm_rcu(struct i915_gem_context *ctx) +{ + struct i915_address_space *vm; + + rcu_read_lock(); + vm = rcu_dereference(ctx->vm); + if (!vm) + vm = &ctx->i915->ggtt.vm; + vm = i915_vm_get(vm); + rcu_read_unlock(); + + return vm; +} + static inline struct i915_gem_engines * i915_gem_context_engines(struct i915_gem_context *ctx) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h index 00537b9d7006..3870dd5daaa0 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h @@ -88,7 +88,7 @@ struct i915_gem_context { * In other modes, this is a NULL pointer with the expectation that * the caller uses the shared global GTT. */ - struct i915_address_space *vm; + struct i915_address_space __rcu *vm; /** * @pid: process id of creator @@ -137,6 +137,7 @@ struct i915_gem_context { #define UCONTEXT_NO_ERROR_CAPTURE 1 #define UCONTEXT_BANNABLE 2 #define UCONTEXT_RECOVERABLE 3 +#define UCONTEXT_PERSISTENCE 4 /** * @flags: small set of booleans @@ -146,24 +147,7 @@ struct i915_gem_context { #define CONTEXT_CLOSED 1 #define CONTEXT_FORCE_SINGLE_SUBMISSION 2 #define CONTEXT_USER_ENGINES 3 - - /** - * @hw_id: - unique identifier for the context - * - * The hardware needs to uniquely identify the context for a few - * functions like fault reporting, PASID, scheduling. The - * &drm_i915_private.context_hw_ida is used to assign a unqiue - * id for the lifetime of the context. - * - * @hw_id_pin_count: - number of times this context had been pinned - * for use (should be, at most, once per engine). - * - * @hw_id_link: - all contexts with an assigned id are tracked - * for possible repossession. - */ - unsigned int hw_id; - atomic_t hw_id_pin_count; - struct list_head hw_id_link; +#define CONTEXT_NOPREEMPT 4 struct mutex mutex; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index 96ce95c8ac5a..eaea49d08eb5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -256,6 +256,7 @@ static const struct drm_i915_gem_object_ops i915_gem_object_dmabuf_ops = { struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) { + static struct lock_class_key lock_class; struct dma_buf_attachment *attach; struct drm_i915_gem_object *obj; int ret; @@ -287,7 +288,7 @@ struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev, } drm_gem_private_object_init(dev, &obj->base, dma_buf->size); - i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops); + i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops, &lock_class); obj->base.import_attach = attach; obj->base.resv = dma_buf->resv; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index 9c58e8fac1d9..9937b4c341f1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -27,7 +27,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj) void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) { - if (!READ_ONCE(obj->pin_global)) + if (!i915_gem_object_is_framebuffer(obj)) return; i915_gem_object_lock(obj); @@ -288,14 +288,21 @@ restart: if (!drm_mm_node_allocated(&vma->node)) continue; - ret = i915_vma_bind(vma, cache_level, PIN_UPDATE); + /* Wait for an earlier async bind, need to rewrite it */ + ret = i915_vma_sync(vma); + if (ret) + return ret; + + ret = i915_vma_bind(vma, cache_level, PIN_UPDATE, NULL); if (ret) return ret; } } - list_for_each_entry(vma, &obj->vma.list, obj_link) - vma->node.color = cache_level; + list_for_each_entry(vma, &obj->vma.list, obj_link) { + if (i915_vm_has_cache_coloring(vma->vm)) + vma->node.color = cache_level; + } i915_gem_object_set_cache_coherency(obj, cache_level); obj->cache_dirty = true; /* Always invalidate stale cachelines */ @@ -389,16 +396,11 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data, if (ret) goto out; - ret = mutex_lock_interruptible(&i915->drm.struct_mutex); - if (ret) - goto out; - ret = i915_gem_object_lock_interruptible(obj); if (ret == 0) { ret = i915_gem_object_set_cache_level(obj, level); i915_gem_object_unlock(obj); } - mutex_unlock(&i915->drm.struct_mutex); out: i915_gem_object_put(obj); @@ -422,12 +424,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, assert_object_held(obj); - /* Mark the global pin early so that we account for the - * display coherency whilst setting up the cache domains. - */ - obj->pin_global++; - - /* The display engine is not coherent with the LLC cache on gen6. As + /* + * The display engine is not coherent with the LLC cache on gen6. As * a result, we make sure that the pinning that is about to occur is * done with uncached PTEs. This is lowest common denominator for all * chipsets. @@ -439,12 +437,11 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, ret = i915_gem_object_set_cache_level(obj, HAS_WT(to_i915(obj->base.dev)) ? I915_CACHE_WT : I915_CACHE_NONE); - if (ret) { - vma = ERR_PTR(ret); - goto err_unpin_global; - } + if (ret) + return ERR_PTR(ret); - /* As the user may map the buffer once pinned in the display plane + /* + * As the user may map the buffer once pinned in the display plane * (e.g. libkms for the bootup splash), we have to ensure that we * always use map_and_fenceable for all scanout buffers. However, * it may simply be too big to fit into mappable, in which case @@ -461,22 +458,19 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, if (IS_ERR(vma)) vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); if (IS_ERR(vma)) - goto err_unpin_global; + return vma; vma->display_alignment = max_t(u64, vma->display_alignment, alignment); __i915_gem_object_flush_for_display(obj); - /* It should now be out of any other write domains, and we can update + /* + * It should now be out of any other write domains, and we can update * the domain values for our changes. */ obj->read_domains |= I915_GEM_DOMAIN_GTT; return vma; - -err_unpin_global: - obj->pin_global--; - return vma; } static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) @@ -491,6 +485,7 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) if (!drm_mm_node_allocated(&vma->node)) continue; + GEM_BUG_ON(vma->vm != &i915->ggtt.vm); list_move_tail(&vma->vm_link, &vma->vm->bound_list); } mutex_unlock(&i915->ggtt.vm.mutex); @@ -500,7 +495,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) spin_lock_irqsave(&i915->mm.obj_lock, flags); - if (obj->mm.madv == I915_MADV_WILLNEED) + if (obj->mm.madv == I915_MADV_WILLNEED && + !atomic_read(&obj->mm.shrink_pin)) list_move_tail(&obj->mm.link, &i915->mm.shrink_list); spin_unlock_irqrestore(&i915->mm.obj_lock, flags); @@ -514,12 +510,6 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) assert_object_held(obj); - if (WARN_ON(obj->pin_global == 0)) - return; - - if (--obj->pin_global == 0) - vma->display_alignment = I915_GTT_MIN_ALIGNMENT; - /* Bump the LRU to try and avoid premature eviction whilst flipping */ i915_gem_object_bump_inactive_ggtt(obj); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index e635e1e5f4d3..f0998f1225af 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -19,6 +19,7 @@ #include "gt/intel_engine_pool.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_ring.h" #include "i915_drv.h" #include "i915_gem_clflush.h" @@ -252,6 +253,7 @@ struct i915_execbuffer { bool has_fence : 1; bool needs_unfenced : 1; + struct intel_context *ce; struct i915_request *rq; u32 *rq_cmd; unsigned int rq_size; @@ -699,7 +701,9 @@ static int eb_reserve(struct i915_execbuffer *eb) case 1: /* Too fragmented, unbind everything and retry */ + mutex_lock(&eb->context->vm->mutex); err = i915_gem_evict_vm(eb->context->vm); + mutex_unlock(&eb->context->vm->mutex); if (err) return err; break; @@ -727,7 +731,7 @@ static int eb_select_context(struct i915_execbuffer *eb) return -ENOENT; eb->gem_context = ctx; - if (ctx->vm) + if (rcu_access_pointer(ctx->vm)) eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT; eb->context_flags = 0; @@ -882,6 +886,9 @@ static void eb_destroy(const struct i915_execbuffer *eb) { GEM_BUG_ON(eb->reloc_cache.rq); + if (eb->reloc_cache.ce) + intel_context_put(eb->reloc_cache.ce); + if (eb->lut_size > 0) kfree(eb->buckets); } @@ -904,7 +911,8 @@ static void reloc_cache_init(struct reloc_cache *cache, cache->use_64bit_reloc = HAS_64BIT_RELOC(i915); cache->has_fence = cache->gen < 4; cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment; - cache->node.allocated = false; + cache->node.flags = 0; + cache->ce = NULL; cache->rq = NULL; cache->rq_size = 0; } @@ -965,11 +973,13 @@ static void reloc_cache_reset(struct reloc_cache *cache) intel_gt_flush_ggtt_writes(ggtt->vm.gt); io_mapping_unmap_atomic((void __iomem *)vaddr); - if (cache->node.allocated) { + if (drm_mm_node_allocated(&cache->node)) { ggtt->vm.clear_range(&ggtt->vm, cache->node.start, cache->node.size); + mutex_lock(&ggtt->vm.mutex); drm_mm_remove_node(&cache->node); + mutex_unlock(&ggtt->vm.mutex); } else { i915_vma_unpin((struct i915_vma *)cache->node.mm); } @@ -1044,11 +1054,13 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, PIN_NOEVICT); if (IS_ERR(vma)) { memset(&cache->node, 0, sizeof(cache->node)); + mutex_lock(&ggtt->vm.mutex); err = drm_mm_insert_node_in_range (&ggtt->vm.mm, &cache->node, PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, 0, ggtt->mappable_end, DRM_MM_INSERT_LOW); + mutex_unlock(&ggtt->vm.mutex); if (err) /* no inactive aperture space, use cpu reloc */ return NULL; } else { @@ -1058,7 +1070,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj, } offset = cache->node.start; - if (cache->node.allocated) { + if (drm_mm_node_allocated(&cache->node)) { ggtt->vm.insert_page(&ggtt->vm, i915_gem_object_get_dma_address(obj, page), offset, I915_CACHE_NONE, 0); @@ -1147,7 +1159,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, u32 *cmd; int err; - pool = intel_engine_pool_get(&eb->engine->pool, PAGE_SIZE); + pool = intel_engine_get_pool(eb->engine, PAGE_SIZE); if (IS_ERR(pool)) return PTR_ERR(pool); @@ -1170,7 +1182,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb, if (err) goto err_unmap; - rq = i915_request_create(eb->context); + rq = intel_context_create_request(cache->ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_unpin; @@ -1241,6 +1253,29 @@ static u32 *reloc_gpu(struct i915_execbuffer *eb, if (!intel_engine_can_store_dword(eb->engine)) return ERR_PTR(-ENODEV); + if (!cache->ce) { + struct intel_context *ce; + + /* + * The CS pre-parser can pre-fetch commands across + * memory sync points and starting gen12 it is able to + * pre-fetch across BB_START and BB_END boundaries + * (within the same context). We therefore use a + * separate context gen12+ to guarantee that the reloc + * writes land before the parser gets to the target + * memory location. + */ + if (cache->gen >= 12) + ce = intel_context_create(eb->context->gem_context, + eb->engine); + else + ce = intel_context_get(eb->context); + if (IS_ERR(ce)) + return ERR_CAST(ce); + + cache->ce = ce; + } + err = __reloc_gpu_alloc(eb, vma, len); if (unlikely(err)) return ERR_PTR(err); @@ -1390,7 +1425,7 @@ eb_relocate_entry(struct i915_execbuffer *eb, if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION && IS_GEN(eb->i915, 6)) { err = i915_vma_bind(target, target->obj->cache_level, - PIN_GLOBAL); + PIN_GLOBAL, NULL); if (WARN_ONCE(err, "Unexpected failure to bind target VMA!")) return err; @@ -1992,7 +2027,7 @@ static struct i915_vma *eb_parse(struct i915_execbuffer *eb) u64 shadow_batch_start; int err; - pool = intel_engine_pool_get(&eb->engine->pool, eb->batch_len); + pool = intel_engine_get_pool(eb->engine, eb->batch_len); if (IS_ERR(pool)) return ERR_CAST(pool); @@ -2099,6 +2134,9 @@ static int eb_submit(struct i915_execbuffer *eb) if (err) return err; + if (i915_gem_context_nopreempt(eb->gem_context)) + eb->request->flags |= I915_REQUEST_NOPREEMPT; + return 0; } @@ -2168,35 +2206,6 @@ static struct i915_request *eb_throttle(struct intel_context *ce) return i915_request_get(rq); } -static int -__eb_pin_context(struct i915_execbuffer *eb, struct intel_context *ce) -{ - int err; - - if (likely(atomic_inc_not_zero(&ce->pin_count))) - return 0; - - err = mutex_lock_interruptible(&eb->i915->drm.struct_mutex); - if (err) - return err; - - err = __intel_context_do_pin(ce); - mutex_unlock(&eb->i915->drm.struct_mutex); - - return err; -} - -static void -__eb_unpin_context(struct i915_execbuffer *eb, struct intel_context *ce) -{ - if (likely(atomic_add_unless(&ce->pin_count, -1, 1))) - return; - - mutex_lock(&eb->i915->drm.struct_mutex); - intel_context_unpin(ce); - mutex_unlock(&eb->i915->drm.struct_mutex); -} - static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce) { struct intel_timeline *tl; @@ -2216,7 +2225,7 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce) * GGTT space, so do this first before we reserve a seqno for * ourselves. */ - err = __eb_pin_context(eb, ce); + err = intel_context_pin(ce); if (err) return err; @@ -2260,7 +2269,7 @@ err_exit: intel_context_exit(ce); intel_context_timeline_unlock(tl); err_unpin: - __eb_unpin_context(eb, ce); + intel_context_unpin(ce); return err; } @@ -2273,7 +2282,7 @@ static void eb_unpin_engine(struct i915_execbuffer *eb) intel_context_exit(ce); mutex_unlock(&tl->mutex); - __eb_unpin_context(eb, ce); + intel_context_unpin(ce); } static unsigned int diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index 0c41e04ab8fa..9cfb0e41ff06 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -117,13 +117,6 @@ create_st: goto err; } - /* Mark the pages as dontneed whilst they are still pinned. As soon - * as they are unpinned they are allowed to be reaped by the shrinker, - * and the caller is expected to repopulate - the contents of this - * object are only valid whilst active and pinned. - */ - obj->mm.madv = I915_MADV_DONTNEED; - __i915_gem_object_set_pages(obj, st, sg_page_sizes); return 0; @@ -143,7 +136,6 @@ static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj, internal_free_pages(pages); obj->mm.dirty = false; - obj->mm.madv = I915_MADV_WILLNEED; } static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = { @@ -172,6 +164,7 @@ struct drm_i915_gem_object * i915_gem_object_create_internal(struct drm_i915_private *i915, phys_addr_t size) { + static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; unsigned int cache_level; @@ -186,7 +179,16 @@ i915_gem_object_create_internal(struct drm_i915_private *i915, return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &i915_gem_object_internal_ops); + i915_gem_object_init(obj, &i915_gem_object_internal_ops, &lock_class); + + /* + * Mark the object as volatile, such that the pages are marked as + * dontneed whilst they are still pinned. As soon as they are unpinned + * they are allowed to be reaped by the shrinker, and the caller is + * expected to repopulate - the contents of this object are only valid + * whilst active and pinned. + */ + i915_gem_object_set_volatile(obj); obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.c b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c new file mode 100644 index 000000000000..0e2bf6b7e143 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "intel_memory_region.h" +#include "gem/i915_gem_region.h" +#include "gem/i915_gem_lmem.h" +#include "i915_drv.h" + +const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops = { + .flags = I915_GEM_OBJECT_HAS_IOMEM, + + .get_pages = i915_gem_object_get_pages_buddy, + .put_pages = i915_gem_object_put_pages_buddy, + .release = i915_gem_object_release_memory_region, +}; + +/* XXX: Time to vfunc your life up? */ +void __iomem * +i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj, + unsigned long n) +{ + resource_size_t offset; + + offset = i915_gem_object_get_dma_address(obj, n); + offset -= obj->mm.region->region.start; + + return io_mapping_map_wc(&obj->mm.region->iomap, offset, PAGE_SIZE); +} + +void __iomem * +i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj, + unsigned long n) +{ + resource_size_t offset; + + offset = i915_gem_object_get_dma_address(obj, n); + offset -= obj->mm.region->region.start; + + return io_mapping_map_atomic_wc(&obj->mm.region->iomap, offset); +} + +void __iomem * +i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, + unsigned long n, + unsigned long size) +{ + resource_size_t offset; + + GEM_BUG_ON(!i915_gem_object_is_contiguous(obj)); + + offset = i915_gem_object_get_dma_address(obj, n); + offset -= obj->mm.region->region.start; + + return io_mapping_map_wc(&obj->mm.region->iomap, offset, size); +} + +bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj) +{ + return obj->ops == &i915_gem_lmem_obj_ops; +} + +struct drm_i915_gem_object * +i915_gem_object_create_lmem(struct drm_i915_private *i915, + resource_size_t size, + unsigned int flags) +{ + return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM], + size, flags); +} + +struct drm_i915_gem_object * +__i915_gem_lmem_object_create(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags) +{ + static struct lock_class_key lock_class; + struct drm_i915_private *i915 = mem->i915; + struct drm_i915_gem_object *obj; + + if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size) + return ERR_PTR(-E2BIG); + + obj = i915_gem_object_alloc(); + if (!obj) + return ERR_PTR(-ENOMEM); + + drm_gem_private_object_init(&i915->drm, &obj->base, size); + i915_gem_object_init(obj, &i915_gem_lmem_obj_ops, &lock_class); + + obj->read_domains = I915_GEM_DOMAIN_WC | I915_GEM_DOMAIN_GTT; + + i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); + + i915_gem_object_init_memory_region(obj, mem, flags); + + return obj; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_lmem.h b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h new file mode 100644 index 000000000000..7c176b8b7d2f --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_lmem.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_GEM_LMEM_H +#define __I915_GEM_LMEM_H + +#include <linux/types.h> + +struct drm_i915_private; +struct drm_i915_gem_object; +struct intel_memory_region; + +extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops; + +void __iomem *i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj, + unsigned long n, unsigned long size); +void __iomem *i915_gem_object_lmem_io_map_page(struct drm_i915_gem_object *obj, + unsigned long n); +void __iomem * +i915_gem_object_lmem_io_map_page_atomic(struct drm_i915_gem_object *obj, + unsigned long n); + +bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj); + +struct drm_i915_gem_object * +i915_gem_object_create_lmem(struct drm_i915_private *i915, + resource_size_t size, + unsigned int flags); + +struct drm_i915_gem_object * +__i915_gem_lmem_object_create(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags); + +#endif /* !__I915_GEM_LMEM_H */ diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 05289edbafe3..e3002849844b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -8,6 +8,7 @@ #include <linux/sizes.h> #include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" #include "i915_drv.h" #include "i915_gem_gtt.h" @@ -249,16 +250,6 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) if (ret) goto err_rpm; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto err_reset; - - /* Access to snoopable pages through the GTT is incoherent. */ - if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) { - ret = -EFAULT; - goto err_unlock; - } - /* Now pin it into the GTT as needed */ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE | @@ -285,10 +276,19 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) view.type = I915_GGTT_VIEW_PARTIAL; vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags); } + + /* The entire mappable GGTT is pinned? Unexpected! */ + GEM_BUG_ON(vma == ERR_PTR(-ENOSPC)); } if (IS_ERR(vma)) { ret = PTR_ERR(vma); - goto err_unlock; + goto err_reset; + } + + /* Access to snoopable pages through the GTT is incoherent. */ + if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(i915)) { + ret = -EFAULT; + goto err_unpin; } ret = i915_vma_pin_fence(vma); @@ -312,7 +312,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) list_add(&obj->userfault_link, &i915->ggtt.userfault_list); mutex_unlock(&i915->ggtt.vm.mutex); - if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) + if (IS_ACTIVE(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)) intel_wakeref_auto(&i915->ggtt.userfault_wakeref, msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)); @@ -326,8 +326,6 @@ err_fence: i915_vma_unpin_fence(vma); err_unpin: __i915_vma_unpin(vma); -err_unlock: - mutex_unlock(&dev->struct_mutex); err_reset: intel_gt_reset_unlock(ggtt->vm.gt, srcu); err_rpm: @@ -335,23 +333,20 @@ err_rpm: i915_gem_object_unpin_pages(obj); err: switch (ret) { - case -EIO: - /* - * We eat errors when the gpu is terminally wedged to avoid - * userspace unduly crashing (gl has no provisions for mmaps to - * fail). But any other -EIO isn't ours (e.g. swap in failure) - * and so needs to be reported. - */ - if (!intel_gt_is_wedged(ggtt->vm.gt)) - return VM_FAULT_SIGBUS; - /* else, fall through */ - case -EAGAIN: - /* - * EAGAIN means the gpu is hung and we'll wait for the error - * handler to reset everything when re-faulting in - * i915_mutex_lock_interruptible. - */ + default: + WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret); + /* fallthrough */ + case -EIO: /* shmemfs failure from swap device */ + case -EFAULT: /* purged object */ + case -ENODEV: /* bad object, how did you get here! */ + return VM_FAULT_SIGBUS; + + case -ENOSPC: /* shmemfs allocation failure */ + case -ENOMEM: /* our allocation failure */ + return VM_FAULT_OOM; + case 0: + case -EAGAIN: case -ERESTARTSYS: case -EINTR: case -EBUSY: @@ -360,15 +355,6 @@ err: * already did the job. */ return VM_FAULT_NOPAGE; - case -ENOMEM: - return VM_FAULT_OOM; - case -ENOSPC: - case -EFAULT: - case -ENODEV: /* bad object, how did you get here! */ - return VM_FAULT_SIGBUS; - default: - WARN_ONCE(ret, "unhandled error in %s: %i\n", __func__, ret); - return VM_FAULT_SIGBUS; } } @@ -439,6 +425,7 @@ out: static int create_mmap_offset(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct intel_gt *gt = &i915->gt; int err; err = drm_gem_create_mmap_offset(&obj->base); @@ -446,21 +433,12 @@ static int create_mmap_offset(struct drm_i915_gem_object *obj) return 0; /* Attempt to reap some mmap space from dead objects */ - do { - err = i915_gem_wait_for_idle(i915, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); - if (err) - break; - - i915_gem_drain_freed_objects(i915); - err = drm_gem_create_mmap_offset(&obj->base); - if (!err) - break; - - } while (flush_delayed_work(&i915->gem.retire_work)); + err = intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT); + if (err) + return err; - return err; + i915_gem_drain_freed_objects(i915); + return drm_gem_create_mmap_offset(&obj->base); } int diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index d7855dc5a5c5..a50296cce0d8 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -47,9 +47,10 @@ void i915_gem_object_free(struct drm_i915_gem_object *obj) } void i915_gem_object_init(struct drm_i915_gem_object *obj, - const struct drm_i915_gem_object_ops *ops) + const struct drm_i915_gem_object_ops *ops, + struct lock_class_key *key) { - mutex_init(&obj->mm.lock); + __mutex_init(&obj->mm.lock, "obj->mm.lock", key); spin_lock_init(&obj->vma.lock); INIT_LIST_HEAD(&obj->vma.list); @@ -155,21 +156,30 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, wakeref = intel_runtime_pm_get(&i915->runtime_pm); llist_for_each_entry_safe(obj, on, freed, freed) { - struct i915_vma *vma, *vn; - trace_i915_gem_object_destroy(obj); - mutex_lock(&i915->drm.struct_mutex); - - list_for_each_entry_safe(vma, vn, &obj->vma.list, obj_link) { - GEM_BUG_ON(i915_vma_is_active(vma)); - vma->flags &= ~I915_VMA_PIN_MASK; - i915_vma_destroy(vma); + if (!list_empty(&obj->vma.list)) { + struct i915_vma *vma; + + /* + * Note that the vma keeps an object reference while + * it is active, so it *should* not sleep while we + * destroy it. Our debug code errs insits it *might*. + * For the moment, play along. + */ + spin_lock(&obj->vma.lock); + while ((vma = list_first_entry_or_null(&obj->vma.list, + struct i915_vma, + obj_link))) { + GEM_BUG_ON(vma->obj != obj); + spin_unlock(&obj->vma.lock); + + i915_vma_destroy(vma); + + spin_lock(&obj->vma.lock); + } + spin_unlock(&obj->vma.lock); } - GEM_BUG_ON(!list_empty(&obj->vma.list)); - GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma.tree)); - - mutex_unlock(&i915->drm.struct_mutex); GEM_BUG_ON(atomic_read(&obj->bind_count)); GEM_BUG_ON(obj->userfault_count); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index ddf3605bea8e..458cd51331f1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -23,12 +23,14 @@ struct drm_i915_gem_object *i915_gem_object_alloc(void); void i915_gem_object_free(struct drm_i915_gem_object *obj); void i915_gem_object_init(struct drm_i915_gem_object *obj, - const struct drm_i915_gem_object_ops *ops); + const struct drm_i915_gem_object_ops *ops, + struct lock_class_key *key); struct drm_i915_gem_object * -i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size); +i915_gem_object_create_shmem(struct drm_i915_private *i915, + resource_size_t size); struct drm_i915_gem_object * i915_gem_object_create_shmem_from_data(struct drm_i915_private *i915, - const void *data, size_t size); + const void *data, resource_size_t size); extern const struct drm_i915_gem_object_ops i915_gem_shmem_ops; void __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, @@ -106,6 +108,11 @@ static inline void i915_gem_object_lock(struct drm_i915_gem_object *obj) dma_resv_lock(obj->base.resv, NULL); } +static inline bool i915_gem_object_trylock(struct drm_i915_gem_object *obj) +{ + return dma_resv_trylock(obj->base.resv); +} + static inline int i915_gem_object_lock_interruptible(struct drm_i915_gem_object *obj) { @@ -135,33 +142,58 @@ i915_gem_object_is_readonly(const struct drm_i915_gem_object *obj) } static inline bool +i915_gem_object_is_contiguous(const struct drm_i915_gem_object *obj) +{ + return obj->flags & I915_BO_ALLOC_CONTIGUOUS; +} + +static inline bool +i915_gem_object_is_volatile(const struct drm_i915_gem_object *obj) +{ + return obj->flags & I915_BO_ALLOC_VOLATILE; +} + +static inline void +i915_gem_object_set_volatile(struct drm_i915_gem_object *obj) +{ + obj->flags |= I915_BO_ALLOC_VOLATILE; +} + +static inline bool +i915_gem_object_type_has(const struct drm_i915_gem_object *obj, + unsigned long flags) +{ + return obj->ops->flags & flags; +} + +static inline bool i915_gem_object_has_struct_page(const struct drm_i915_gem_object *obj) { - return obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE; + return i915_gem_object_type_has(obj, I915_GEM_OBJECT_HAS_STRUCT_PAGE); } static inline bool i915_gem_object_is_shrinkable(const struct drm_i915_gem_object *obj) { - return obj->ops->flags & I915_GEM_OBJECT_IS_SHRINKABLE; + return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_SHRINKABLE); } static inline bool i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj) { - return obj->ops->flags & I915_GEM_OBJECT_IS_PROXY; + return i915_gem_object_type_has(obj, I915_GEM_OBJECT_IS_PROXY); } static inline bool i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj) { - return obj->ops->flags & I915_GEM_OBJECT_NO_GGTT; + return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT); } static inline bool i915_gem_object_needs_async_cancel(const struct drm_i915_gem_object *obj) { - return obj->ops->flags & I915_GEM_OBJECT_ASYNC_CANCEL; + return i915_gem_object_type_has(obj, I915_GEM_OBJECT_ASYNC_CANCEL); } static inline bool @@ -412,7 +444,8 @@ static inline bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj) if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) return true; - return obj->pin_global; /* currently in use by HW, keep flushed */ + /* Currently in use by HW (display engine)? Keep flushed. */ + return i915_gem_object_is_framebuffer(obj); } static inline void __start_cpu_write(struct drm_i915_gem_object *obj) @@ -429,6 +462,5 @@ int i915_gem_object_wait(struct drm_i915_gem_object *obj, int i915_gem_object_wait_priority(struct drm_i915_gem_object *obj, unsigned int flags, const struct i915_sched_attr *attr); -#define I915_PRIORITY_DISPLAY I915_USER_PRIORITY(I915_PRIORITY_MAX) #endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c index 6415f9a17e2d..70809d8897cd 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c @@ -8,6 +8,7 @@ #include "gt/intel_engine_pm.h" #include "gt/intel_engine_pool.h" #include "gt/intel_gt.h" +#include "gt/intel_ring.h" #include "i915_gem_clflush.h" #include "i915_gem_object_blt.h" @@ -16,7 +17,7 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, u32 value) { struct drm_i915_private *i915 = ce->vm->i915; - const u32 block_size = S16_MAX * PAGE_SIZE; + const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */ struct intel_engine_pool_node *pool; struct i915_vma *batch; u64 offset; @@ -29,10 +30,10 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce, GEM_BUG_ON(intel_engine_is_virtual(ce->engine)); intel_engine_pm_get(ce->engine); - count = div_u64(vma->size, block_size); + count = div_u64(round_up(vma->size, block_size), block_size); size = (1 + 8 * count) * sizeof(u32); size = round_up(size, PAGE_SIZE); - pool = intel_engine_pool_get(&ce->engine->pool, size); + pool = intel_engine_get_pool(ce->engine, size); if (IS_ERR(pool)) { err = PTR_ERR(pool); goto out_pm; @@ -200,7 +201,7 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, struct i915_vma *dst) { struct drm_i915_private *i915 = ce->vm->i915; - const u32 block_size = S16_MAX * PAGE_SIZE; + const u32 block_size = SZ_8M; /* ~1ms at 8GiB/s preemption delay */ struct intel_engine_pool_node *pool; struct i915_vma *batch; u64 src_offset, dst_offset; @@ -213,10 +214,10 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce, GEM_BUG_ON(intel_engine_is_virtual(ce->engine)); intel_engine_pm_get(ce->engine); - count = div_u64(dst->size, block_size); + count = div_u64(round_up(dst->size, block_size), block_size); size = (1 + 11 * count) * sizeof(u32); size = round_up(size, PAGE_SIZE); - pool = intel_engine_pool_get(&ce->engine->pool, size); + pool = intel_engine_get_pool(ce->engine, size); if (IS_ERR(pool)) { err = PTR_ERR(pool); goto out_pm; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h index 646859fea224..96008374a412 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h @@ -8,6 +8,7 @@ #define __I915_GEM_OBJECT_TYPES_H__ #include <drm/drm_gem.h> +#include <uapi/drm/i915_drm.h> #include "i915_active.h" #include "i915_selftest.h" @@ -30,10 +31,11 @@ struct i915_lut_handle { struct drm_i915_gem_object_ops { unsigned int flags; #define I915_GEM_OBJECT_HAS_STRUCT_PAGE BIT(0) -#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(1) -#define I915_GEM_OBJECT_IS_PROXY BIT(2) -#define I915_GEM_OBJECT_NO_GGTT BIT(3) -#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(4) +#define I915_GEM_OBJECT_HAS_IOMEM BIT(1) +#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2) +#define I915_GEM_OBJECT_IS_PROXY BIT(3) +#define I915_GEM_OBJECT_NO_GGTT BIT(4) +#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5) /* Interface between the GEM object and its backing storage. * get_pages() is called once prior to the use of the associated set @@ -118,6 +120,11 @@ struct drm_i915_gem_object { I915_SELFTEST_DECLARE(struct list_head st_link); + unsigned long flags; +#define I915_BO_ALLOC_CONTIGUOUS BIT(0) +#define I915_BO_ALLOC_VOLATILE BIT(1) +#define I915_BO_ALLOC_FLAGS (I915_BO_ALLOC_CONTIGUOUS | I915_BO_ALLOC_VOLATILE) + /* * Is the object to be mapped as read-only to the GPU * Only honoured if hardware has relevant pte bit @@ -153,17 +160,30 @@ struct drm_i915_gem_object { /** Count of VMA actually bound by this object */ atomic_t bind_count; - /** Count of how many global VMA are currently pinned for use by HW */ - unsigned int pin_global; struct { struct mutex lock; /* protects the pages and their use */ atomic_t pages_pin_count; + atomic_t shrink_pin; + + /** + * Memory region for this object. + */ + struct intel_memory_region *region; + /** + * List of memory region blocks allocated for this object. + */ + struct list_head blocks; + /** + * Element within memory_region->objects or region->purgeable + * if the object is marked as DONTNEED. Access is protected by + * region->obj_lock. + */ + struct list_head region_link; struct sg_table *pages; void *mapping; - /* TODO: whack some of this into the error state */ struct i915_page_sizes { /** * The sg mask of the pages sg_table. i.e the mask of diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index 18f0ce0135c1..29f4c2850745 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -7,6 +7,7 @@ #include "i915_drv.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" +#include "i915_gem_lmem.h" void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, struct sg_table *pages, @@ -18,6 +19,9 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, lockdep_assert_held(&obj->mm.lock); + if (i915_gem_object_is_volatile(obj)) + obj->mm.madv = I915_MADV_DONTNEED; + /* Make the pages coherent with the GPU (flushing any swapin). */ if (obj->cache_dirty) { obj->write_domain = 0; @@ -71,6 +75,7 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj, list = &i915->mm.shrink_list; list_add_tail(&obj->mm.link, list); + atomic_set(&obj->mm.shrink_pin, 0); spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } } @@ -150,6 +155,16 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj) rcu_read_unlock(); } +static void unmap_object(struct drm_i915_gem_object *obj, void *ptr) +{ + if (i915_gem_object_is_lmem(obj)) + io_mapping_unmap((void __force __iomem *)ptr); + else if (is_vmalloc_addr(ptr)) + vunmap(ptr); + else + kunmap(kmap_to_page(ptr)); +} + struct sg_table * __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) { @@ -159,17 +174,13 @@ __i915_gem_object_unset_pages(struct drm_i915_gem_object *obj) if (IS_ERR_OR_NULL(pages)) return pages; + if (i915_gem_object_is_volatile(obj)) + obj->mm.madv = I915_MADV_WILLNEED; + i915_gem_object_make_unshrinkable(obj); if (obj->mm.mapping) { - void *ptr; - - ptr = page_mask_bits(obj->mm.mapping); - if (is_vmalloc_addr(ptr)) - vunmap(ptr); - else - kunmap(kmap_to_page(ptr)); - + unmap_object(obj, page_mask_bits(obj->mm.mapping)); obj->mm.mapping = NULL; } @@ -224,7 +235,7 @@ unlock: } /* The 'mapping' part of i915_gem_object_pin_map() below */ -static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, +static void *i915_gem_object_map(struct drm_i915_gem_object *obj, enum i915_map_type type) { unsigned long n_pages = obj->base.size >> PAGE_SHIFT; @@ -237,6 +248,16 @@ static void *i915_gem_object_map(const struct drm_i915_gem_object *obj, pgprot_t pgprot; void *addr; + if (i915_gem_object_is_lmem(obj)) { + void __iomem *io; + + if (type != I915_MAP_WC) + return NULL; + + io = i915_gem_object_lmem_io_map(obj, 0, obj->base.size); + return (void __force *)io; + } + /* A single page can always be kmapped */ if (n_pages == 1 && type == I915_MAP_WB) return kmap(sg_page(sgt->sgl)); @@ -278,11 +299,13 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, enum i915_map_type type) { enum i915_map_type has_type; + unsigned int flags; bool pinned; void *ptr; int err; - if (unlikely(!i915_gem_object_has_struct_page(obj))) + flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE | I915_GEM_OBJECT_HAS_IOMEM; + if (!i915_gem_object_type_has(obj, flags)) return ERR_PTR(-ENXIO); err = mutex_lock_interruptible(&obj->mm.lock); @@ -314,10 +337,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj, goto err_unpin; } - if (is_vmalloc_addr(ptr)) - vunmap(ptr); - else - kunmap(kmap_to_page(ptr)); + unmap_object(obj, ptr); ptr = obj->mm.mapping = NULL; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c index 768356908160..8043ff63d73f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c @@ -16,6 +16,7 @@ #include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_gem_object.h" +#include "i915_gem_region.h" #include "i915_scatterlist.h" static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) @@ -191,8 +192,10 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align) /* Perma-pin (until release) the physical set of pages */ __i915_gem_object_pin_pages(obj); - if (!IS_ERR_OR_NULL(pages)) + if (!IS_ERR_OR_NULL(pages)) { i915_gem_shmem_ops.put_pages(obj, pages); + i915_gem_object_release_memory_region(obj); + } mutex_unlock(&obj->mm.lock); return 0; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c index ad2a63dbcac2..f88ee1317bb4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c @@ -7,138 +7,9 @@ #include "gem/i915_gem_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_gt_requests.h" #include "i915_drv.h" -#include "i915_globals.h" - -static void call_idle_barriers(struct intel_engine_cs *engine) -{ - struct llist_node *node, *next; - - llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { - struct i915_active_request *active = - container_of((struct list_head *)node, - typeof(*active), link); - - INIT_LIST_HEAD(&active->link); - RCU_INIT_POINTER(active->request, NULL); - - active->retire(active, NULL); - } -} - -static void i915_gem_park(struct drm_i915_private *i915) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - lockdep_assert_held(&i915->drm.struct_mutex); - - for_each_engine(engine, i915, id) - call_idle_barriers(engine); /* cleanup after wedging */ - - i915_vma_parked(i915); - - i915_globals_park(); -} - -static void idle_work_handler(struct work_struct *work) -{ - struct drm_i915_private *i915 = - container_of(work, typeof(*i915), gem.idle_work); - bool park; - - cancel_delayed_work_sync(&i915->gem.retire_work); - mutex_lock(&i915->drm.struct_mutex); - - intel_wakeref_lock(&i915->gt.wakeref); - park = (!intel_wakeref_is_active(&i915->gt.wakeref) && - !work_pending(work)); - intel_wakeref_unlock(&i915->gt.wakeref); - if (park) - i915_gem_park(i915); - else - queue_delayed_work(i915->wq, - &i915->gem.retire_work, - round_jiffies_up_relative(HZ)); - - mutex_unlock(&i915->drm.struct_mutex); -} - -static void retire_work_handler(struct work_struct *work) -{ - struct drm_i915_private *i915 = - container_of(work, typeof(*i915), gem.retire_work.work); - - /* Come back later if the device is busy... */ - if (mutex_trylock(&i915->drm.struct_mutex)) { - i915_retire_requests(i915); - mutex_unlock(&i915->drm.struct_mutex); - } - - queue_delayed_work(i915->wq, - &i915->gem.retire_work, - round_jiffies_up_relative(HZ)); -} - -static int pm_notifier(struct notifier_block *nb, - unsigned long action, - void *data) -{ - struct drm_i915_private *i915 = - container_of(nb, typeof(*i915), gem.pm_notifier); - - switch (action) { - case INTEL_GT_UNPARK: - i915_globals_unpark(); - queue_delayed_work(i915->wq, - &i915->gem.retire_work, - round_jiffies_up_relative(HZ)); - break; - - case INTEL_GT_PARK: - queue_work(i915->wq, &i915->gem.idle_work); - break; - } - - return NOTIFY_OK; -} - -static bool switch_to_kernel_context_sync(struct intel_gt *gt) -{ - bool result = !intel_gt_is_wedged(gt); - - do { - if (i915_gem_wait_for_idle(gt->i915, - I915_WAIT_LOCKED | - I915_WAIT_FOR_IDLE_BOOST, - I915_GEM_IDLE_TIMEOUT) == -ETIME) { - /* XXX hide warning from gem_eio */ - if (i915_modparams.reset) { - dev_err(gt->i915->drm.dev, - "Failed to idle engines, declaring wedged!\n"); - GEM_TRACE_DUMP(); - } - - /* - * Forcibly cancel outstanding work and leave - * the gpu quiet. - */ - intel_gt_set_wedged(gt); - result = false; - } - } while (i915_retire_requests(gt->i915) && result); - - if (intel_gt_pm_wait_for_idle(gt)) - result = false; - - return result; -} - -bool i915_gem_load_power_context(struct drm_i915_private *i915) -{ - return switch_to_kernel_context_sync(&i915->gt); -} void i915_gem_suspend(struct drm_i915_private *i915) { @@ -147,8 +18,6 @@ void i915_gem_suspend(struct drm_i915_private *i915) intel_wakeref_auto(&i915->ggtt.userfault_wakeref, 0); flush_workqueue(i915->wq); - mutex_lock(&i915->drm.struct_mutex); - /* * We have to flush all the executing contexts to main memory so * that they can saved in the hibernation image. To ensure the last @@ -158,15 +27,9 @@ void i915_gem_suspend(struct drm_i915_private *i915) * state. Fortunately, the kernel_context is disposable and we do * not rely on its state. */ - switch_to_kernel_context_sync(&i915->gt); - - mutex_unlock(&i915->drm.struct_mutex); - - cancel_delayed_work_sync(&i915->gt.hangcheck.work); + intel_gt_suspend_prepare(&i915->gt); i915_gem_drain_freed_objects(i915); - - intel_uc_suspend(&i915->gt.uc); } static struct drm_i915_gem_object *first_mm_object(struct list_head *list) @@ -206,6 +69,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) * machine in an unusable condition. */ + intel_gt_suspend_late(&i915->gt); + spin_lock_irqsave(&i915->mm.obj_lock, flags); for (phase = phases; *phase; phase++) { LIST_HEAD(keep); @@ -230,18 +95,15 @@ void i915_gem_suspend_late(struct drm_i915_private *i915) list_splice_tail(&keep, *phase); } spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - - i915_gem_sanitize(i915); } void i915_gem_resume(struct drm_i915_private *i915) { GEM_TRACE("\n"); - mutex_lock(&i915->drm.struct_mutex); intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); - if (i915_gem_init_hw(i915)) + if (intel_gt_init_hw(&i915->gt)) goto err_wedged; /* @@ -252,15 +114,8 @@ void i915_gem_resume(struct drm_i915_private *i915) if (intel_gt_resume(&i915->gt)) goto err_wedged; - intel_uc_resume(&i915->gt.uc); - - /* Always reload a context for powersaving. */ - if (!i915_gem_load_power_context(i915)) - goto err_wedged; - out_unlock: intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); - mutex_unlock(&i915->drm.struct_mutex); return; err_wedged: @@ -271,13 +126,3 @@ err_wedged: } goto out_unlock; } - -void i915_gem_init__pm(struct drm_i915_private *i915) -{ - INIT_WORK(&i915->gem.idle_work, idle_work_handler); - INIT_DELAYED_WORK(&i915->gem.retire_work, retire_work_handler); - - i915->gem.pm_notifier.notifier_call = pm_notifier; - blocking_notifier_chain_register(&i915->gt.pm_notifications, - &i915->gem.pm_notifier); -} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.h b/drivers/gpu/drm/i915/gem/i915_gem_pm.h index 6f7d5d11ac3b..26b78dbdc225 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pm.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.h @@ -12,9 +12,6 @@ struct drm_i915_private; struct work_struct; -void i915_gem_init__pm(struct drm_i915_private *i915); - -bool i915_gem_load_power_context(struct drm_i915_private *i915); void i915_gem_resume(struct drm_i915_private *i915); void i915_gem_idle_work_handler(struct work_struct *work); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.c b/drivers/gpu/drm/i915/gem/i915_gem_region.c new file mode 100644 index 000000000000..2f7bcfb9c964 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.c @@ -0,0 +1,174 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "intel_memory_region.h" +#include "i915_gem_region.h" +#include "i915_drv.h" +#include "i915_trace.h" + +void +i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj, + struct sg_table *pages) +{ + __intel_memory_region_put_pages_buddy(obj->mm.region, &obj->mm.blocks); + + obj->mm.dirty = false; + sg_free_table(pages); + kfree(pages); +} + +int +i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj) +{ + struct intel_memory_region *mem = obj->mm.region; + struct list_head *blocks = &obj->mm.blocks; + resource_size_t size = obj->base.size; + resource_size_t prev_end; + struct i915_buddy_block *block; + unsigned int flags; + struct sg_table *st; + struct scatterlist *sg; + unsigned int sg_page_sizes; + int ret; + + st = kmalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + if (sg_alloc_table(st, size >> ilog2(mem->mm.chunk_size), GFP_KERNEL)) { + kfree(st); + return -ENOMEM; + } + + flags = I915_ALLOC_MIN_PAGE_SIZE; + if (obj->flags & I915_BO_ALLOC_CONTIGUOUS) + flags |= I915_ALLOC_CONTIGUOUS; + + ret = __intel_memory_region_get_pages_buddy(mem, size, flags, blocks); + if (ret) + goto err_free_sg; + + GEM_BUG_ON(list_empty(blocks)); + + sg = st->sgl; + st->nents = 0; + sg_page_sizes = 0; + prev_end = (resource_size_t)-1; + + list_for_each_entry(block, blocks, link) { + u64 block_size, offset; + + block_size = min_t(u64, size, + i915_buddy_block_size(&mem->mm, block)); + offset = i915_buddy_block_offset(block); + + GEM_BUG_ON(overflows_type(block_size, sg->length)); + + if (offset != prev_end || + add_overflows_t(typeof(sg->length), sg->length, block_size)) { + if (st->nents) { + sg_page_sizes |= sg->length; + sg = __sg_next(sg); + } + + sg_dma_address(sg) = mem->region.start + offset; + sg_dma_len(sg) = block_size; + + sg->length = block_size; + + st->nents++; + } else { + sg->length += block_size; + sg_dma_len(sg) += block_size; + } + + prev_end = offset + block_size; + }; + + sg_page_sizes |= sg->length; + sg_mark_end(sg); + i915_sg_trim(st); + + __i915_gem_object_set_pages(obj, st, sg_page_sizes); + + return 0; + +err_free_sg: + sg_free_table(st); + kfree(st); + return ret; +} + +void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, + struct intel_memory_region *mem, + unsigned long flags) +{ + INIT_LIST_HEAD(&obj->mm.blocks); + obj->mm.region = intel_memory_region_get(mem); + obj->flags |= flags; + + mutex_lock(&mem->objects.lock); + + if (obj->flags & I915_BO_ALLOC_VOLATILE) + list_add(&obj->mm.region_link, &mem->objects.purgeable); + else + list_add(&obj->mm.region_link, &mem->objects.list); + + mutex_unlock(&mem->objects.lock); +} + +void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj) +{ + struct intel_memory_region *mem = obj->mm.region; + + mutex_lock(&mem->objects.lock); + list_del(&obj->mm.region_link); + mutex_unlock(&mem->objects.lock); + + intel_memory_region_put(mem); +} + +struct drm_i915_gem_object * +i915_gem_object_create_region(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags) +{ + struct drm_i915_gem_object *obj; + + /* + * NB: Our use of resource_size_t for the size stems from using struct + * resource for the mem->region. We might need to revisit this in the + * future. + */ + + GEM_BUG_ON(flags & ~I915_BO_ALLOC_FLAGS); + + if (!mem) + return ERR_PTR(-ENODEV); + + size = round_up(size, mem->min_page_size); + + GEM_BUG_ON(!size); + GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_MIN_ALIGNMENT)); + + /* + * XXX: There is a prevalence of the assumption that we fit the + * object's page count inside a 32bit _signed_ variable. Let's document + * this and catch if we ever need to fix it. In the meantime, if you do + * spot such a local variable, please consider fixing! + */ + + if (size >> PAGE_SHIFT > INT_MAX) + return ERR_PTR(-E2BIG); + + if (overflows_type(size, obj->base.size)) + return ERR_PTR(-E2BIG); + + obj = mem->ops->create_object(mem, size, flags); + if (!IS_ERR(obj)) + trace_i915_gem_object_create(obj); + + return obj; +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_region.h b/drivers/gpu/drm/i915/gem/i915_gem_region.h new file mode 100644 index 000000000000..f2ff6f8bff74 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_region.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_GEM_REGION_H__ +#define __I915_GEM_REGION_H__ + +#include <linux/types.h> + +struct intel_memory_region; +struct drm_i915_gem_object; +struct sg_table; + +int i915_gem_object_get_pages_buddy(struct drm_i915_gem_object *obj); +void i915_gem_object_put_pages_buddy(struct drm_i915_gem_object *obj, + struct sg_table *pages); + +void i915_gem_object_init_memory_region(struct drm_i915_gem_object *obj, + struct intel_memory_region *mem, + unsigned long flags); +void i915_gem_object_release_memory_region(struct drm_i915_gem_object *obj); + +struct drm_i915_gem_object * +i915_gem_object_create_region(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags); + +#endif diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index 4c4954e8ce0a..4d69c3fc3439 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -7,7 +7,9 @@ #include <linux/pagevec.h> #include <linux/swap.h> +#include "gem/i915_gem_region.h" #include "i915_drv.h" +#include "i915_gemfs.h" #include "i915_gem_object.h" #include "i915_scatterlist.h" #include "i915_trace.h" @@ -26,6 +28,7 @@ static void check_release_pagevec(struct pagevec *pvec) static int shmem_get_pages(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); + struct intel_memory_region *mem = obj->mm.region; const unsigned long page_count = obj->base.size / PAGE_SIZE; unsigned long i; struct address_space *mapping; @@ -52,7 +55,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) * If there's no chance of allocating enough pages for the whole * object, bail early. */ - if (page_count > totalram_pages()) + if (obj->base.size > resource_size(&mem->region)) return -ENOMEM; st = kmalloc(sizeof(*st), GFP_KERNEL); @@ -417,6 +420,8 @@ shmem_pwrite(struct drm_i915_gem_object *obj, static void shmem_release(struct drm_i915_gem_object *obj) { + i915_gem_object_release_memory_region(obj); + fput(obj->base.filp); } @@ -434,9 +439,9 @@ const struct drm_i915_gem_object_ops i915_gem_shmem_ops = { .release = shmem_release, }; -static int create_shmem(struct drm_i915_private *i915, - struct drm_gem_object *obj, - size_t size) +static int __create_shmem(struct drm_i915_private *i915, + struct drm_gem_object *obj, + resource_size_t size) { unsigned long flags = VM_NORESERVE; struct file *filp; @@ -455,31 +460,24 @@ static int create_shmem(struct drm_i915_private *i915, return 0; } -struct drm_i915_gem_object * -i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size) +static struct drm_i915_gem_object * +create_shmem(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags) { + static struct lock_class_key lock_class; + struct drm_i915_private *i915 = mem->i915; struct drm_i915_gem_object *obj; struct address_space *mapping; unsigned int cache_level; gfp_t mask; int ret; - /* There is a prevalence of the assumption that we fit the object's - * page count inside a 32bit _signed_ variable. Let's document this and - * catch if we ever need to fix it. In the meantime, if you do spot - * such a local variable, please consider fixing! - */ - if (size >> PAGE_SHIFT > INT_MAX) - return ERR_PTR(-E2BIG); - - if (overflows_type(size, obj->base.size)) - return ERR_PTR(-E2BIG); - obj = i915_gem_object_alloc(); if (!obj) return ERR_PTR(-ENOMEM); - ret = create_shmem(i915, &obj->base, size); + ret = __create_shmem(i915, &obj->base, size); if (ret) goto fail; @@ -494,7 +492,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size) mapping_set_gfp_mask(mapping, mask); GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); - i915_gem_object_init(obj, &i915_gem_shmem_ops); + i915_gem_object_init(obj, &i915_gem_shmem_ops, &lock_class); obj->write_domain = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU; @@ -518,7 +516,7 @@ i915_gem_object_create_shmem(struct drm_i915_private *i915, u64 size) i915_gem_object_set_cache_coherency(obj, cache_level); - trace_i915_gem_object_create(obj); + i915_gem_object_init_memory_region(obj, mem, 0); return obj; @@ -527,14 +525,22 @@ fail: return ERR_PTR(ret); } +struct drm_i915_gem_object * +i915_gem_object_create_shmem(struct drm_i915_private *i915, + resource_size_t size) +{ + return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], + size, 0); +} + /* Allocate a new GEM object and fill it with the supplied data */ struct drm_i915_gem_object * i915_gem_object_create_shmem_from_data(struct drm_i915_private *dev_priv, - const void *data, size_t size) + const void *data, resource_size_t size) { struct drm_i915_gem_object *obj; struct file *file; - size_t offset; + resource_size_t offset; int err; obj = i915_gem_object_create_shmem(dev_priv, round_up(size, PAGE_SIZE)); @@ -577,3 +583,35 @@ fail: i915_gem_object_put(obj); return ERR_PTR(err); } + +static int init_shmem(struct intel_memory_region *mem) +{ + int err; + + err = i915_gemfs_init(mem->i915); + if (err) { + DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", + err); + } + + return 0; /* Don't error, we can simply fallback to the kernel mnt */ +} + +static void release_shmem(struct intel_memory_region *mem) +{ + i915_gemfs_fini(mem->i915); +} + +static const struct intel_memory_region_ops shmem_region_ops = { + .init = init_shmem, + .release = release_shmem, + .create_object = create_shmem, +}; + +struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915) +{ + return intel_memory_region_create(i915, 0, + totalram_pages() << PAGE_SHIFT, + PAGE_SIZE, 0, + &shmem_region_ops); +} diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index 1a51b3598d63..f2418a1cfe68 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -16,40 +16,6 @@ #include "i915_trace.h" -static bool shrinker_lock(struct drm_i915_private *i915, - unsigned int flags, - bool *unlock) -{ - struct mutex *m = &i915->drm.struct_mutex; - - switch (mutex_trylock_recursive(m)) { - case MUTEX_TRYLOCK_RECURSIVE: - *unlock = false; - return true; - - case MUTEX_TRYLOCK_FAILED: - *unlock = false; - if (flags & I915_SHRINK_ACTIVE && - mutex_lock_killable_nested(m, I915_MM_SHRINKER) == 0) - *unlock = true; - return *unlock; - - case MUTEX_TRYLOCK_SUCCESS: - *unlock = true; - return true; - } - - BUG(); -} - -static void shrinker_unlock(struct drm_i915_private *i915, bool unlock) -{ - if (!unlock) - return; - - mutex_unlock(&i915->drm.struct_mutex); -} - static bool swap_available(void) { return get_nr_swap_pages() > 0; @@ -61,7 +27,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) if (!i915_gem_object_is_shrinkable(obj)) return false; - /* Only report true if by unbinding the object and putting its pages + /* + * Only report true if by unbinding the object and putting its pages * we can actually make forward progress towards freeing physical * pages. * @@ -72,16 +39,8 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) if (atomic_read(&obj->mm.pages_pin_count) > atomic_read(&obj->bind_count)) return false; - /* If any vma are "permanently" pinned, it will prevent us from - * reclaiming the obj->mm.pages. We only allow scanout objects to claim - * a permanent pin, along with a few others like the context objects. - * To simplify the scan, and to avoid walking the list of vma under the - * object, we just check the count of its permanently pinned. - */ - if (READ_ONCE(obj->pin_global)) - return false; - - /* We can only return physical pages to the system if we can either + /* + * We can only return physical pages to the system if we can either * discard the contents (because the user has marked them as being * purgeable) or if we can move their contents out to swap. */ @@ -162,10 +121,6 @@ i915_gem_shrink(struct drm_i915_private *i915, intel_wakeref_t wakeref = 0; unsigned long count = 0; unsigned long scanned = 0; - bool unlock; - - if (!shrinker_lock(i915, shrink, &unlock)) - return 0; /* * When shrinking the active list, we should also consider active @@ -275,8 +230,6 @@ i915_gem_shrink(struct drm_i915_private *i915, if (shrink & I915_SHRINK_BOUND) intel_runtime_pm_put(&i915->runtime_pm, wakeref); - shrinker_unlock(i915, unlock); - if (nr_scanned) *nr_scanned += scanned; return count; @@ -346,19 +299,14 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) struct drm_i915_private *i915 = container_of(shrinker, struct drm_i915_private, mm.shrinker); unsigned long freed; - bool unlock; sc->nr_scanned = 0; - if (!shrinker_lock(i915, 0, &unlock)) - return SHRINK_STOP; - freed = i915_gem_shrink(i915, sc->nr_to_scan, &sc->nr_scanned, I915_SHRINK_BOUND | - I915_SHRINK_UNBOUND | - I915_SHRINK_WRITEBACK); + I915_SHRINK_UNBOUND); if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) { intel_wakeref_t wakeref; @@ -373,8 +321,6 @@ i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) } } - shrinker_unlock(i915, unlock); - return sc->nr_scanned ? freed : SHRINK_STOP; } @@ -391,6 +337,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr) freed_pages = 0; with_intel_runtime_pm(&i915->runtime_pm, wakeref) freed_pages += i915_gem_shrink(i915, -1UL, NULL, + I915_SHRINK_ACTIVE | I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_WRITEBACK); @@ -426,10 +373,6 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr struct i915_vma *vma, *next; unsigned long freed_pages = 0; intel_wakeref_t wakeref; - bool unlock; - - if (!shrinker_lock(i915, 0, &unlock)) - return NOTIFY_DONE; with_intel_runtime_pm(&i915->runtime_pm, wakeref) freed_pages += i915_gem_shrink(i915, -1UL, NULL, @@ -446,15 +389,11 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr if (!vma->iomap || i915_vma_is_active(vma)) continue; - mutex_unlock(&i915->ggtt.vm.mutex); - if (i915_vma_unbind(vma) == 0) + if (__i915_vma_unbind(vma) == 0) freed_pages += count; - mutex_lock(&i915->ggtt.vm.mutex); } mutex_unlock(&i915->ggtt.vm.mutex); - shrinker_unlock(i915, unlock); - *(unsigned long *)ptr += freed_pages; return NOTIFY_DONE; } @@ -497,22 +436,9 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, fs_reclaim_acquire(GFP_KERNEL); - /* - * As we invariably rely on the struct_mutex within the shrinker, - * but have a complicated recursion dance, taint all the mutexes used - * within the shrinker with the struct_mutex. For completeness, we - * taint with all subclass of struct_mutex, even though we should - * only need tainting by I915_MM_NORMAL to catch possible ABBA - * deadlocks from using struct_mutex inside @mutex. - */ - mutex_acquire(&i915->drm.struct_mutex.dep_map, - I915_MM_SHRINKER, 0, _RET_IP_); - mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_); mutex_release(&mutex->dep_map, _RET_IP_); - mutex_release(&i915->drm.struct_mutex.dep_map, _RET_IP_); - fs_reclaim_release(GFP_KERNEL); if (unlock) @@ -523,46 +449,52 @@ void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915, void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj) { + struct drm_i915_private *i915 = obj_to_i915(obj); + unsigned long flags; + /* * We can only be called while the pages are pinned or when * the pages are released. If pinned, we should only be called * from a single caller under controlled conditions; and on release * only one caller may release us. Neither the two may cross. */ - if (!list_empty(&obj->mm.link)) { /* pinned by caller */ - struct drm_i915_private *i915 = obj_to_i915(obj); - unsigned long flags; - - spin_lock_irqsave(&i915->mm.obj_lock, flags); - GEM_BUG_ON(list_empty(&obj->mm.link)); + if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0)) + return; + spin_lock_irqsave(&i915->mm.obj_lock, flags); + if (!atomic_fetch_inc(&obj->mm.shrink_pin) && + !list_empty(&obj->mm.link)) { list_del_init(&obj->mm.link); i915->mm.shrink_count--; i915->mm.shrink_memory -= obj->base.size; - - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj, struct list_head *head) { + struct drm_i915_private *i915 = obj_to_i915(obj); + unsigned long flags; + GEM_BUG_ON(!i915_gem_object_has_pages(obj)); - GEM_BUG_ON(!list_empty(&obj->mm.link)); + if (!i915_gem_object_is_shrinkable(obj)) + return; - if (i915_gem_object_is_shrinkable(obj)) { - struct drm_i915_private *i915 = obj_to_i915(obj); - unsigned long flags; + if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1)) + return; - spin_lock_irqsave(&i915->mm.obj_lock, flags); - GEM_BUG_ON(!kref_read(&obj->base.refcount)); + spin_lock_irqsave(&i915->mm.obj_lock, flags); + GEM_BUG_ON(!kref_read(&obj->base.refcount)); + if (atomic_dec_and_test(&obj->mm.shrink_pin)) { + GEM_BUG_ON(!list_empty(&obj->mm.link)); list_add_tail(&obj->mm.link, head); i915->mm.shrink_count++; i915->mm.shrink_memory += obj->base.size; - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } + spin_unlock_irqrestore(&i915->mm.obj_lock, flags); } void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c index aa533b4ab5f5..a2d49c04e6a4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c @@ -10,6 +10,7 @@ #include <drm/drm_mm.h> #include <drm/i915_drm.h> +#include "gem/i915_gem_region.h" #include "i915_drv.h" #include "i915_gem_stolen.h" @@ -150,7 +151,7 @@ static int i915_adjust_stolen(struct drm_i915_private *dev_priv, return 0; } -void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv) +static void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv) { if (!drm_mm_initialized(&dev_priv->mm.stolen)) return; @@ -355,7 +356,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915, } } -int i915_gem_init_stolen(struct drm_i915_private *dev_priv) +static int i915_gem_init_stolen(struct drm_i915_private *dev_priv) { resource_size_t reserved_base, stolen_top; resource_size_t reserved_total, reserved_size; @@ -425,8 +426,11 @@ int i915_gem_init_stolen(struct drm_i915_private *dev_priv) bdw_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); break; - case 11: default: + MISSING_CASE(INTEL_GEN(dev_priv)); + /* fall-through */ + case 11: + case 12: icl_get_stolen_reserved(dev_priv, &reserved_base, &reserved_size); break; @@ -536,6 +540,9 @@ i915_gem_object_release_stolen(struct drm_i915_gem_object *obj) i915_gem_stolen_remove_node(dev_priv, stolen); kfree(stolen); + + if (obj->mm.region) + i915_gem_object_release_memory_region(obj); } static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { @@ -545,65 +552,116 @@ static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = { }; static struct drm_i915_gem_object * -_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, - struct drm_mm_node *stolen) +__i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, + struct drm_mm_node *stolen, + struct intel_memory_region *mem) { + static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; unsigned int cache_level; + int err = -ENOMEM; obj = i915_gem_object_alloc(); - if (obj == NULL) - return NULL; + if (!obj) + goto err; drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size); - i915_gem_object_init(obj, &i915_gem_object_stolen_ops); + i915_gem_object_init(obj, &i915_gem_object_stolen_ops, &lock_class); obj->stolen = stolen; obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; i915_gem_object_set_cache_coherency(obj, cache_level); - if (i915_gem_object_pin_pages(obj)) + err = i915_gem_object_pin_pages(obj); + if (err) goto cleanup; + if (mem) + i915_gem_object_init_memory_region(obj, mem, 0); + return obj; cleanup: i915_gem_object_free(obj); - return NULL; +err: + return ERR_PTR(err); } -struct drm_i915_gem_object * -i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, - resource_size_t size) +static struct drm_i915_gem_object * +_i915_gem_object_create_stolen(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags) { + struct drm_i915_private *dev_priv = mem->i915; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) - return NULL; + return ERR_PTR(-ENODEV); if (size == 0) - return NULL; + return ERR_PTR(-EINVAL); stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); if (!stolen) - return NULL; + return ERR_PTR(-ENOMEM); ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096); if (ret) { - kfree(stolen); - return NULL; + obj = ERR_PTR(ret); + goto err_free; } - obj = _i915_gem_object_create_stolen(dev_priv, stolen); - if (obj) - return obj; + obj = __i915_gem_object_create_stolen(dev_priv, stolen, mem); + if (IS_ERR(obj)) + goto err_remove; + return obj; + +err_remove: i915_gem_stolen_remove_node(dev_priv, stolen); +err_free: kfree(stolen); - return NULL; + return obj; +} + +struct drm_i915_gem_object * +i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, + resource_size_t size) +{ + return i915_gem_object_create_region(dev_priv->mm.regions[INTEL_REGION_STOLEN], + size, I915_BO_ALLOC_CONTIGUOUS); +} + +static int init_stolen(struct intel_memory_region *mem) +{ + /* + * Initialise stolen early so that we may reserve preallocated + * objects for the BIOS to KMS transition. + */ + return i915_gem_init_stolen(mem->i915); +} + +static void release_stolen(struct intel_memory_region *mem) +{ + i915_gem_cleanup_stolen(mem->i915); +} + +static const struct intel_memory_region_ops i915_region_stolen_ops = { + .init = init_stolen, + .release = release_stolen, + .create_object = _i915_gem_object_create_stolen, +}; + +struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915) +{ + return intel_memory_region_create(i915, + intel_graphics_stolen_res.start, + resource_size(&intel_graphics_stolen_res), + PAGE_SIZE, 0, + &i915_region_stolen_ops); } struct drm_i915_gem_object * @@ -619,9 +677,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) - return NULL; - - lockdep_assert_held(&dev_priv->drm.struct_mutex); + return ERR_PTR(-ENODEV); DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n", &stolen_offset, >t_offset, &size); @@ -630,11 +686,11 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv if (WARN_ON(size == 0) || WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) || WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT))) - return NULL; + return ERR_PTR(-EINVAL); stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); if (!stolen) - return NULL; + return ERR_PTR(-ENOMEM); stolen->start = stolen_offset; stolen->size = size; @@ -644,15 +700,15 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv if (ret) { DRM_DEBUG_DRIVER("failed to allocate stolen space\n"); kfree(stolen); - return NULL; + return ERR_PTR(ret); } - obj = _i915_gem_object_create_stolen(dev_priv, stolen); - if (obj == NULL) { + obj = __i915_gem_object_create_stolen(dev_priv, stolen, NULL); + if (IS_ERR(obj)) { DRM_DEBUG_DRIVER("failed to allocate stolen object\n"); i915_gem_stolen_remove_node(dev_priv, stolen); kfree(stolen); - return NULL; + return obj; } /* Some objects just need physical mem from stolen space */ @@ -674,22 +730,26 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv * setting up the GTT space. The actual reservation will occur * later. */ + mutex_lock(&ggtt->vm.mutex); ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, size, gtt_offset, obj->cache_level, 0); if (ret) { DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n"); + mutex_unlock(&ggtt->vm.mutex); goto err_pages; } GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); + GEM_BUG_ON(vma->pages); vma->pages = obj->mm.pages; - vma->flags |= I915_VMA_GLOBAL_BIND; + atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE); + + set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma)); __i915_vma_set_map_and_fenceable(vma); - mutex_lock(&ggtt->vm.mutex); - list_move_tail(&vma->vm_link, &ggtt->vm.bound_list); + list_add_tail(&vma->vm_link, &ggtt->vm.bound_list); mutex_unlock(&ggtt->vm.mutex); GEM_BUG_ON(i915_gem_object_is_shrinkable(obj)); @@ -701,5 +761,5 @@ err_pages: i915_gem_object_unpin_pages(obj); err: i915_gem_object_put(obj); - return NULL; + return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h index 2289644d8604..c1040627fbf3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h @@ -21,8 +21,7 @@ int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, u64 end); void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv, struct drm_mm_node *node); -int i915_gem_init_stolen(struct drm_i915_private *dev_priv); -void i915_gem_cleanup_stolen(struct drm_i915_private *dev_priv); +struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915); struct drm_i915_gem_object * i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, resource_size_t size); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c index 1e372420771b..540ef0551789 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_throttle.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_throttle.c @@ -50,10 +50,8 @@ i915_gem_throttle_ioctl(struct drm_device *dev, void *data, if (time_after_eq(request->emitted_jiffies, recent_enough)) break; - if (target) { + if (target && xchg(&target->file_priv, NULL)) list_del(&target->client_link); - target->file_priv = NULL; - } target = request; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index ca0c2f451742..1fa592d82af5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -181,22 +181,25 @@ static int i915_gem_object_fence_prepare(struct drm_i915_gem_object *obj, int tiling_mode, unsigned int stride) { + struct i915_ggtt *ggtt = &to_i915(obj->base.dev)->ggtt; struct i915_vma *vma; - int ret; + int ret = 0; if (tiling_mode == I915_TILING_NONE) return 0; + mutex_lock(&ggtt->vm.mutex); for_each_ggtt_vma(vma, obj) { if (i915_vma_fence_prepare(vma, tiling_mode, stride)) continue; - ret = i915_vma_unbind(vma); + ret = __i915_vma_unbind(vma); if (ret) - return ret; + break; } + mutex_unlock(&ggtt->vm.mutex); - return 0; + return ret; } int @@ -212,7 +215,6 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, GEM_BUG_ON(!i915_tiling_ok(obj, tiling, stride)); GEM_BUG_ON(!stride ^ (tiling == I915_TILING_NONE)); - lockdep_assert_held(&i915->drm.struct_mutex); if ((tiling | stride) == obj->tiling_and_stride) return 0; @@ -233,16 +235,18 @@ i915_gem_object_set_tiling(struct drm_i915_gem_object *obj, * whilst executing a fenced command for an untiled object. */ - err = i915_gem_object_fence_prepare(obj, tiling, stride); - if (err) - return err; - i915_gem_object_lock(obj); if (i915_gem_object_is_framebuffer(obj)) { i915_gem_object_unlock(obj); return -EBUSY; } + err = i915_gem_object_fence_prepare(obj, tiling, stride); + if (err) { + i915_gem_object_unlock(obj); + return err; + } + /* If the memory has unknown (i.e. varying) swizzling, we pin the * pages to prevent them being swapped out and causing corruption * due to the change in swizzling. @@ -313,10 +317,14 @@ int i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_set_tiling *args = data; struct drm_i915_gem_object *obj; int err; + if (!dev_priv->ggtt.num_fences) + return -EOPNOTSUPP; + obj = i915_gem_object_lookup(file, args->handle); if (!obj) return -ENOENT; @@ -340,9 +348,9 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, args->stride = 0; } else { if (args->tiling_mode == I915_TILING_X) - args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_x; + args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_x; else - args->swizzle_mode = to_i915(dev)->mm.bit_6_swizzle_y; + args->swizzle_mode = to_i915(dev)->ggtt.bit_6_swizzle_y; /* Hide bit 17 swizzling from the user. This prevents old Mesa * from aborting the application on sw fallbacks to bit 17, @@ -364,12 +372,7 @@ i915_gem_set_tiling_ioctl(struct drm_device *dev, void *data, } } - err = mutex_lock_interruptible(&dev->struct_mutex); - if (err) - goto err; - err = i915_gem_object_set_tiling(obj, args->tiling_mode, args->stride); - mutex_unlock(&dev->struct_mutex); /* We have to maintain this existing ABI... */ args->stride = i915_gem_object_get_stride(obj); @@ -402,6 +405,9 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_i915_gem_object *obj; int err = -ENOENT; + if (!dev_priv->ggtt.num_fences) + return -EOPNOTSUPP; + rcu_read_lock(); obj = i915_gem_object_lookup_rcu(file, args->handle); if (obj) { @@ -415,10 +421,10 @@ i915_gem_get_tiling_ioctl(struct drm_device *dev, void *data, switch (args->tiling_mode) { case I915_TILING_X: - args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; + args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_x; break; case I915_TILING_Y: - args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; + args->swizzle_mode = dev_priv->ggtt.bit_6_swizzle_y; break; default: case I915_TILING_NONE: diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index abfbac49b8e8..4c72d74d6576 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -92,7 +92,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); struct interval_tree_node *it; - struct mutex *unlock = NULL; unsigned long end; int ret = 0; @@ -129,33 +128,13 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, } spin_unlock(&mn->lock); - if (!unlock) { - unlock = &mn->mm->i915->drm.struct_mutex; - - switch (mutex_trylock_recursive(unlock)) { - default: - case MUTEX_TRYLOCK_FAILED: - if (mutex_lock_killable_nested(unlock, I915_MM_SHRINKER)) { - i915_gem_object_put(obj); - return -EINTR; - } - /* fall through */ - case MUTEX_TRYLOCK_SUCCESS: - break; - - case MUTEX_TRYLOCK_RECURSIVE: - unlock = ERR_PTR(-EEXIST); - break; - } - } - ret = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE); if (ret == 0) ret = __i915_gem_object_put_pages(obj, I915_MM_SHRINKER); i915_gem_object_put(obj); if (ret) - goto unlock; + return ret; spin_lock(&mn->lock); @@ -168,10 +147,6 @@ userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, } spin_unlock(&mn->lock); -unlock: - if (!IS_ERR_OR_NULL(unlock)) - mutex_unlock(unlock); - return ret; } @@ -770,6 +745,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + static struct lock_class_key lock_class; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_userptr *args = data; struct drm_i915_gem_object *obj; @@ -803,7 +779,8 @@ i915_gem_userptr_ioctl(struct drm_device *dev, * On almost all of the older hw, we cannot tell the GPU that * a page is readonly. */ - vm = dev_priv->kernel_context->vm; + vm = rcu_dereference_protected(dev_priv->kernel_context->vm, + true); /* static vm */ if (!vm || !vm->has_read_only) return -ENODEV; } @@ -813,7 +790,7 @@ i915_gem_userptr_ioctl(struct drm_device *dev, return -ENOMEM; drm_gem_private_object_init(dev, &obj->base, args->user_size); - i915_gem_object_init(obj, &i915_gem_userptr_ops); + i915_gem_object_init(obj, &i915_gem_userptr_ops, &lock_class); obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c index 3c5d17b2b670..892d12db6c49 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_gem_object.c @@ -96,6 +96,7 @@ huge_gem_object(struct drm_i915_private *i915, phys_addr_t phys_size, dma_addr_t dma_size) { + static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; unsigned int cache_level; @@ -111,7 +112,7 @@ huge_gem_object(struct drm_i915_private *i915, return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, dma_size); - i915_gem_object_init(obj, &huge_ops); + i915_gem_object_init(obj, &huge_ops, &lock_class); obj->read_domains = I915_GEM_DOMAIN_CPU; obj->write_domain = I915_GEM_DOMAIN_CPU; diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 8de83c6d81f5..688c49a24f32 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -8,6 +8,8 @@ #include "i915_selftest.h" +#include "gem/i915_gem_region.h" +#include "gem/i915_gem_lmem.h" #include "gem/i915_gem_pm.h" #include "gt/intel_gt.h" @@ -17,6 +19,7 @@ #include "selftests/mock_drm.h" #include "selftests/mock_gem_device.h" +#include "selftests/mock_region.h" #include "selftests/i915_random.h" static const unsigned int page_sizes[] = { @@ -113,8 +116,6 @@ static int get_huge_pages(struct drm_i915_gem_object *obj) if (i915_gem_gtt_prepare_pages(obj, st)) goto err; - obj->mm.madv = I915_MADV_DONTNEED; - GEM_BUG_ON(sg_page_sizes != obj->mm.page_mask); __i915_gem_object_set_pages(obj, st, sg_page_sizes); @@ -135,7 +136,6 @@ static void put_huge_pages(struct drm_i915_gem_object *obj, huge_pages_free_pages(pages); obj->mm.dirty = false; - obj->mm.madv = I915_MADV_WILLNEED; } static const struct drm_i915_gem_object_ops huge_page_ops = { @@ -150,6 +150,7 @@ huge_pages_object(struct drm_i915_private *i915, u64 size, unsigned int page_mask) { + static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; GEM_BUG_ON(!size); @@ -166,7 +167,9 @@ huge_pages_object(struct drm_i915_private *i915, return ERR_PTR(-ENOMEM); drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &huge_page_ops); + i915_gem_object_init(obj, &huge_page_ops, &lock_class); + + i915_gem_object_set_volatile(obj); obj->write_domain = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU; @@ -227,8 +230,6 @@ static int fake_get_huge_pages(struct drm_i915_gem_object *obj) i915_sg_trim(st); - obj->mm.madv = I915_MADV_DONTNEED; - __i915_gem_object_set_pages(obj, st, sg_page_sizes); return 0; @@ -261,8 +262,6 @@ static int fake_get_huge_pages_single(struct drm_i915_gem_object *obj) sg_dma_len(sg) = obj->base.size; sg_dma_address(sg) = page_size; - obj->mm.madv = I915_MADV_DONTNEED; - __i915_gem_object_set_pages(obj, st, sg->length); return 0; @@ -281,7 +280,6 @@ static void fake_put_huge_pages(struct drm_i915_gem_object *obj, { fake_free_huge_pages(obj, pages); obj->mm.dirty = false; - obj->mm.madv = I915_MADV_WILLNEED; } static const struct drm_i915_gem_object_ops fake_ops = { @@ -299,6 +297,7 @@ static const struct drm_i915_gem_object_ops fake_ops_single = { static struct drm_i915_gem_object * fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) { + static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; GEM_BUG_ON(!size); @@ -317,9 +316,11 @@ fake_huge_pages_object(struct drm_i915_private *i915, u64 size, bool single) drm_gem_private_object_init(&i915->drm, &obj->base, size); if (single) - i915_gem_object_init(obj, &fake_ops_single); + i915_gem_object_init(obj, &fake_ops_single, &lock_class); else - i915_gem_object_init(obj, &fake_ops); + i915_gem_object_init(obj, &fake_ops, &lock_class); + + i915_gem_object_set_volatile(obj); obj->write_domain = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU; @@ -333,7 +334,12 @@ static int igt_check_page_sizes(struct i915_vma *vma) struct drm_i915_private *i915 = vma->vm->i915; unsigned int supported = INTEL_INFO(i915)->page_sizes; struct drm_i915_gem_object *obj = vma->obj; - int err = 0; + int err; + + /* We have to wait for the async bind to complete before our asserts */ + err = i915_vma_sync(vma); + if (err) + return err; if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) { pr_err("unsupported page_sizes.sg=%u, supported=%u\n", @@ -447,6 +453,88 @@ out_device: return err; } +static int igt_mock_memory_region_huge_pages(void *arg) +{ + const unsigned int flags[] = { 0, I915_BO_ALLOC_CONTIGUOUS }; + struct i915_ppgtt *ppgtt = arg; + struct drm_i915_private *i915 = ppgtt->vm.i915; + unsigned long supported = INTEL_INFO(i915)->page_sizes; + struct intel_memory_region *mem; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int bit; + int err = 0; + + mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0); + if (IS_ERR(mem)) { + pr_err("%s failed to create memory region\n", __func__); + return PTR_ERR(mem); + } + + for_each_set_bit(bit, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) { + unsigned int page_size = BIT(bit); + resource_size_t phys; + int i; + + for (i = 0; i < ARRAY_SIZE(flags); ++i) { + obj = i915_gem_object_create_region(mem, page_size, + flags[i]); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_region; + } + + vma = i915_vma_instance(obj, &ppgtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_put; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto out_close; + + err = igt_check_page_sizes(vma); + if (err) + goto out_unpin; + + phys = i915_gem_object_get_dma_address(obj, 0); + if (!IS_ALIGNED(phys, page_size)) { + pr_err("%s addr misaligned(%pa) page_size=%u\n", + __func__, &phys, page_size); + err = -EINVAL; + goto out_unpin; + } + + if (vma->page_sizes.gtt != page_size) { + pr_err("%s page_sizes.gtt=%u, expected=%u\n", + __func__, vma->page_sizes.gtt, + page_size); + err = -EINVAL; + goto out_unpin; + } + + i915_vma_unpin(vma); + i915_vma_close(vma); + + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + i915_gem_object_put(obj); + } + } + + goto out_region; + +out_unpin: + i915_vma_unpin(vma); +out_close: + i915_vma_close(vma); +out_put: + i915_gem_object_put(obj); +out_region: + intel_memory_region_put(mem); + return err; +} + static int igt_mock_ppgtt_misaligned_dma(void *arg) { struct i915_ppgtt *ppgtt = arg; @@ -879,9 +967,8 @@ out_object_put: return err; } -static int gpu_write(struct i915_vma *vma, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, +static int gpu_write(struct intel_context *ce, + struct i915_vma *vma, u32 dw, u32 val) { @@ -893,11 +980,12 @@ static int gpu_write(struct i915_vma *vma, if (err) return err; - return igt_gpu_fill_dw(vma, ctx, engine, dw * sizeof(u32), + return igt_gpu_fill_dw(ce, vma, dw * sizeof(u32), vma->size >> PAGE_SHIFT, val); } -static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) +static int +__cpu_check_shmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) { unsigned int needs_flush; unsigned long n; @@ -929,18 +1017,61 @@ static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) return err; } -static int __igt_write_huge(struct i915_gem_context *ctx, - struct intel_engine_cs *engine, +static int __cpu_check_lmem(struct drm_i915_gem_object *obj, u32 dword, u32 val) +{ + unsigned long n; + int err; + + i915_gem_object_lock(obj); + err = i915_gem_object_set_to_wc_domain(obj, false); + i915_gem_object_unlock(obj); + if (err) + return err; + + err = i915_gem_object_pin_pages(obj); + if (err) + return err; + + for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { + u32 __iomem *base; + u32 read_val; + + base = i915_gem_object_lmem_io_map_page_atomic(obj, n); + + read_val = ioread32(base + dword); + io_mapping_unmap_atomic(base); + if (read_val != val) { + pr_err("n=%lu base[%u]=%u, val=%u\n", + n, dword, read_val, val); + err = -EINVAL; + break; + } + } + + i915_gem_object_unpin_pages(obj); + return err; +} + +static int cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) +{ + if (i915_gem_object_has_struct_page(obj)) + return __cpu_check_shmem(obj, dword, val); + else if (i915_gem_object_is_lmem(obj)) + return __cpu_check_lmem(obj, dword, val); + + return -ENODEV; +} + +static int __igt_write_huge(struct intel_context *ce, struct drm_i915_gem_object *obj, u64 size, u64 offset, u32 dword, u32 val) { - struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; struct i915_vma *vma; int err; - vma = i915_vma_instance(obj, vm, NULL); + vma = i915_vma_instance(obj, ce->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -954,7 +1085,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx, * The ggtt may have some pages reserved so * refrain from erroring out. */ - if (err == -ENOSPC && i915_is_ggtt(vm)) + if (err == -ENOSPC && i915_is_ggtt(ce->vm)) err = 0; goto out_vma_close; @@ -964,7 +1095,7 @@ static int __igt_write_huge(struct i915_gem_context *ctx, if (err) goto out_vma_unpin; - err = gpu_write(vma, ctx, engine, dword, val); + err = gpu_write(ce, vma, dword, val); if (err) { pr_err("gpu-write failed at offset=%llx\n", offset); goto out_vma_unpin; @@ -987,14 +1118,13 @@ out_vma_close: static int igt_write_huge(struct i915_gem_context *ctx, struct drm_i915_gem_object *obj) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; - static struct intel_engine_cs *engines[I915_NUM_ENGINES]; - struct intel_engine_cs *engine; + struct i915_gem_engines *engines; + struct i915_gem_engines_iter it; + struct intel_context *ce; I915_RND_STATE(prng); IGT_TIMEOUT(end_time); unsigned int max_page_size; - unsigned int id; + unsigned int count; u64 max; u64 num; u64 size; @@ -1008,19 +1138,18 @@ static int igt_write_huge(struct i915_gem_context *ctx, if (obj->mm.page_sizes.sg & I915_GTT_PAGE_SIZE_64K) size = round_up(size, I915_GTT_PAGE_SIZE_2M); - max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); - max = div_u64((vm->total - size), max_page_size); - n = 0; - for_each_engine(engine, i915, id) { - if (!intel_engine_can_store_dword(engine)) { - pr_info("store-dword-imm not supported on engine=%u\n", - id); + count = 0; + max = U64_MAX; + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + count++; + if (!intel_engine_can_store_dword(ce->engine)) continue; - } - engines[n++] = engine; - } + max = min(max, ce->vm->total); + n++; + } + i915_gem_context_unlock_engines(ctx); if (!n) return 0; @@ -1029,23 +1158,30 @@ static int igt_write_huge(struct i915_gem_context *ctx, * randomized order, lets also make feeding to the same engine a few * times in succession a possibility by enlarging the permutation array. */ - order = i915_random_order(n * I915_NUM_ENGINES, &prng); + order = i915_random_order(count * count, &prng); if (!order) return -ENOMEM; + max_page_size = rounddown_pow_of_two(obj->mm.page_sizes.sg); + max = div_u64(max - size, max_page_size); + /* * Try various offsets in an ascending/descending fashion until we * timeout -- we want to avoid issues hidden by effectively always using * offset = 0. */ i = 0; + engines = i915_gem_context_lock_engines(ctx); for_each_prime_number_from(num, 0, max) { u64 offset_low = num * max_page_size; u64 offset_high = (max - num) * max_page_size; u32 dword = offset_in_page(num) / 4; + struct intel_context *ce; - engine = engines[order[i] % n]; - i = (i + 1) % (n * I915_NUM_ENGINES); + ce = engines->engines[order[i] % engines->num_engines]; + i = (i + 1) % (count * count); + if (!ce || !intel_engine_can_store_dword(ce->engine)) + continue; /* * In order to utilize 64K pages we need to both pad the vma @@ -1057,22 +1193,23 @@ static int igt_write_huge(struct i915_gem_context *ctx, offset_low = round_down(offset_low, I915_GTT_PAGE_SIZE_2M); - err = __igt_write_huge(ctx, engine, obj, size, offset_low, + err = __igt_write_huge(ce, obj, size, offset_low, dword, num + 1); if (err) break; - err = __igt_write_huge(ctx, engine, obj, size, offset_high, + err = __igt_write_huge(ce, obj, size, offset_high, dword, num + 1); if (err) break; if (igt_timeout(end_time, - "%s timed out on engine=%u, offset_low=%llx offset_high=%llx, max_page_size=%x\n", - __func__, engine->id, offset_low, offset_high, + "%s timed out on %s, offset_low=%llx offset_high=%llx, max_page_size=%x\n", + __func__, ce->engine->name, offset_low, offset_high, max_page_size)) break; } + i915_gem_context_unlock_engines(ctx); kfree(order); @@ -1180,131 +1317,235 @@ out_device: return err; } -static int igt_ppgtt_internal_huge(void *arg) +typedef struct drm_i915_gem_object * +(*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags); + +static inline bool igt_can_allocate_thp(struct drm_i915_private *i915) +{ + return i915->mm.gemfs && has_transparent_hugepage(); +} + +static struct drm_i915_gem_object * +igt_create_shmem(struct drm_i915_private *i915, u32 size, u32 flags) +{ + if (!igt_can_allocate_thp(i915)) { + pr_info("%s missing THP support, skipping\n", __func__); + return ERR_PTR(-ENODEV); + } + + return i915_gem_object_create_shmem(i915, size); +} + +static struct drm_i915_gem_object * +igt_create_internal(struct drm_i915_private *i915, u32 size, u32 flags) +{ + return i915_gem_object_create_internal(i915, size); +} + +static struct drm_i915_gem_object * +igt_create_system(struct drm_i915_private *i915, u32 size, u32 flags) +{ + return huge_pages_object(i915, size, size); +} + +static struct drm_i915_gem_object * +igt_create_local(struct drm_i915_private *i915, u32 size, u32 flags) +{ + return i915_gem_object_create_lmem(i915, size, flags); +} + +static u32 igt_random_size(struct rnd_state *prng, + u32 min_page_size, + u32 max_page_size) +{ + u64 mask; + u32 size; + + GEM_BUG_ON(!is_power_of_2(min_page_size)); + GEM_BUG_ON(!is_power_of_2(max_page_size)); + GEM_BUG_ON(min_page_size < PAGE_SIZE); + GEM_BUG_ON(min_page_size > max_page_size); + + mask = ((max_page_size << 1ULL) - 1) & PAGE_MASK; + size = prandom_u32_state(prng) & mask; + if (size < min_page_size) + size |= min_page_size; + + return size; +} + +static int igt_ppgtt_smoke_huge(void *arg) { struct i915_gem_context *ctx = arg; struct drm_i915_private *i915 = ctx->i915; struct drm_i915_gem_object *obj; - static const unsigned int sizes[] = { - SZ_64K, - SZ_128K, - SZ_256K, - SZ_512K, - SZ_1M, - SZ_2M, + I915_RND_STATE(prng); + struct { + igt_create_fn fn; + u32 min; + u32 max; + } backends[] = { + { igt_create_internal, SZ_64K, SZ_2M, }, + { igt_create_shmem, SZ_64K, SZ_32M, }, + { igt_create_local, SZ_64K, SZ_1G, }, }; - int i; int err; + int i; /* - * Sanity check that the HW uses huge pages correctly through internal - * -- ensure that our writes land in the right place. + * Sanity check that the HW uses huge pages correctly through our + * various backends -- ensure that our writes land in the right place. */ - for (i = 0; i < ARRAY_SIZE(sizes); ++i) { - unsigned int size = sizes[i]; + for (i = 0; i < ARRAY_SIZE(backends); ++i) { + u32 min = backends[i].min; + u32 max = backends[i].max; + u32 size = max; +try_again: + size = igt_random_size(&prng, min, rounddown_pow_of_two(size)); - obj = i915_gem_object_create_internal(i915, size); - if (IS_ERR(obj)) - return PTR_ERR(obj); + obj = backends[i].fn(i915, size, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + if (err == -E2BIG) { + size >>= 1; + goto try_again; + } else if (err == -ENODEV) { + err = 0; + continue; + } + + return err; + } err = i915_gem_object_pin_pages(obj); - if (err) + if (err) { + if (err == -ENXIO) { + i915_gem_object_put(obj); + size >>= 1; + goto try_again; + } goto out_put; + } - if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_64K) { - pr_info("internal unable to allocate huge-page(s) with size=%u\n", - size); + if (obj->mm.page_sizes.phys < min) { + pr_info("%s unable to allocate huge-page(s) with size=%u, i=%d\n", + __func__, size, i); + err = -ENOMEM; goto out_unpin; } err = igt_write_huge(ctx, obj); if (err) { - pr_err("internal write-huge failed with size=%u\n", - size); - goto out_unpin; + pr_err("%s write-huge failed with size=%u, i=%d\n", + __func__, size, i); } - +out_unpin: i915_gem_object_unpin_pages(obj); __i915_gem_object_put_pages(obj, I915_MM_NORMAL); +out_put: i915_gem_object_put(obj); - } - return 0; + if (err == -ENOMEM || err == -ENXIO) + err = 0; -out_unpin: - i915_gem_object_unpin_pages(obj); -out_put: - i915_gem_object_put(obj); + if (err) + break; - return err; -} + cond_resched(); + } -static inline bool igt_can_allocate_thp(struct drm_i915_private *i915) -{ - return i915->mm.gemfs && has_transparent_hugepage(); + return err; } -static int igt_ppgtt_gemfs_huge(void *arg) +static int igt_ppgtt_sanity_check(void *arg) { struct i915_gem_context *ctx = arg; struct drm_i915_private *i915 = ctx->i915; - struct drm_i915_gem_object *obj; - static const unsigned int sizes[] = { - SZ_2M, - SZ_4M, - SZ_8M, - SZ_16M, - SZ_32M, + unsigned int supported = INTEL_INFO(i915)->page_sizes; + struct { + igt_create_fn fn; + unsigned int flags; + } backends[] = { + { igt_create_system, 0, }, + { igt_create_local, I915_BO_ALLOC_CONTIGUOUS, }, }; - int i; + struct { + u32 size; + u32 pages; + } combos[] = { + { SZ_64K, SZ_64K }, + { SZ_2M, SZ_2M }, + { SZ_2M, SZ_64K }, + { SZ_2M - SZ_64K, SZ_64K }, + { SZ_2M - SZ_4K, SZ_64K | SZ_4K }, + { SZ_2M + SZ_4K, SZ_64K | SZ_4K }, + { SZ_2M + SZ_4K, SZ_2M | SZ_4K }, + { SZ_2M + SZ_64K, SZ_2M | SZ_64K }, + }; + int i, j; int err; + if (supported == I915_GTT_PAGE_SIZE_4K) + return 0; + /* - * Sanity check that the HW uses huge pages correctly through gemfs -- - * ensure that our writes land in the right place. + * Sanity check that the HW behaves with a limited set of combinations. + * We already have a bunch of randomised testing, which should give us + * a decent amount of variation between runs, however we should keep + * this to limit the chances of introducing a temporary regression, by + * testing the most obvious cases that might make something blow up. */ - if (!igt_can_allocate_thp(i915)) { - pr_info("missing THP support, skipping\n"); - return 0; - } + for (i = 0; i < ARRAY_SIZE(backends); ++i) { + for (j = 0; j < ARRAY_SIZE(combos); ++j) { + struct drm_i915_gem_object *obj; + u32 size = combos[j].size; + u32 pages = combos[j].pages; - for (i = 0; i < ARRAY_SIZE(sizes); ++i) { - unsigned int size = sizes[i]; + obj = backends[i].fn(i915, size, backends[i].flags); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + if (err == -ENODEV) { + pr_info("Device lacks local memory, skipping\n"); + err = 0; + break; + } - obj = i915_gem_object_create_shmem(i915, size); - if (IS_ERR(obj)) - return PTR_ERR(obj); + return err; + } - err = i915_gem_object_pin_pages(obj); - if (err) - goto out_put; + err = i915_gem_object_pin_pages(obj); + if (err) { + i915_gem_object_put(obj); + goto out; + } - if (obj->mm.page_sizes.phys < I915_GTT_PAGE_SIZE_2M) { - pr_info("finishing test early, gemfs unable to allocate huge-page(s) with size=%u\n", - size); - goto out_unpin; - } + GEM_BUG_ON(pages > obj->base.size); + pages = pages & supported; - err = igt_write_huge(ctx, obj); - if (err) { - pr_err("gemfs write-huge failed with size=%u\n", - size); - goto out_unpin; + if (pages) + obj->mm.page_sizes.sg = pages; + + err = igt_write_huge(ctx, obj); + + i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + i915_gem_object_put(obj); + + if (err) { + pr_err("%s write-huge failed with size=%u pages=%u i=%d, j=%d\n", + __func__, size, pages, i, j); + goto out; + } } - i915_gem_object_unpin_pages(obj); - __i915_gem_object_put_pages(obj, I915_MM_NORMAL); - i915_gem_object_put(obj); + cond_resched(); } - return 0; - -out_unpin: - i915_gem_object_unpin_pages(obj); -out_put: - i915_gem_object_put(obj); +out: + if (err == -ENOMEM) + err = 0; return err; } @@ -1314,15 +1555,15 @@ static int igt_ppgtt_pin_update(void *arg) struct i915_gem_context *ctx = arg; struct drm_i915_private *dev_priv = ctx->i915; unsigned long supported = INTEL_INFO(dev_priv)->page_sizes; - struct i915_address_space *vm = ctx->vm; struct drm_i915_gem_object *obj; + struct i915_gem_engines_iter it; + struct i915_address_space *vm; + struct intel_context *ce; struct i915_vma *vma; unsigned int flags = PIN_USER | PIN_OFFSET_FIXED; - struct intel_engine_cs *engine; - enum intel_engine_id id; unsigned int n; int first, last; - int err; + int err = 0; /* * Make sure there's no funny business when doing a PIN_UPDATE -- in the @@ -1332,9 +1573,10 @@ static int igt_ppgtt_pin_update(void *arg) * huge-gtt-pages. */ - if (!vm || !i915_vm_is_4lvl(vm)) { + vm = i915_gem_context_get_vm_rcu(ctx); + if (!i915_vm_is_4lvl(vm)) { pr_info("48b PPGTT not supported, skipping\n"); - return 0; + goto out_vm; } first = ilog2(I915_GTT_PAGE_SIZE_64K); @@ -1387,7 +1629,7 @@ static int igt_ppgtt_pin_update(void *arg) goto out_unpin; } - err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE); + err = i915_vma_bind(vma, I915_CACHE_NONE, PIN_UPDATE, NULL); if (err) goto out_unpin; @@ -1419,14 +1661,18 @@ static int igt_ppgtt_pin_update(void *arg) */ n = 0; - for_each_engine(engine, dev_priv, id) { - if (!intel_engine_can_store_dword(engine)) + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + if (!intel_engine_can_store_dword(ce->engine)) continue; - err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); + err = gpu_write(ce, vma, n++, 0xdeadbeaf); if (err) - goto out_unpin; + break; } + i915_gem_context_unlock_engines(ctx); + if (err) + goto out_unpin; + while (n--) { err = cpu_check(obj, n, 0xdeadbeaf); if (err) @@ -1439,6 +1685,8 @@ out_close: i915_vma_close(vma); out_put: i915_gem_object_put(obj); +out_vm: + i915_vm_put(vm); return err; } @@ -1448,7 +1696,7 @@ static int igt_tmpfs_fallback(void *arg) struct i915_gem_context *ctx = arg; struct drm_i915_private *i915 = ctx->i915; struct vfsmount *gemfs = i915->mm.gemfs; - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; + struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx); struct drm_i915_gem_object *obj; struct i915_vma *vma; u32 *vaddr; @@ -1498,6 +1746,7 @@ out_put: out_restore: i915->mm.gemfs = gemfs; + i915_vm_put(vm); return err; } @@ -1505,14 +1754,14 @@ static int igt_shrink_thp(void *arg) { struct i915_gem_context *ctx = arg; struct drm_i915_private *i915 = ctx->i915; - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; + struct i915_address_space *vm = i915_gem_context_get_vm_rcu(ctx); struct drm_i915_gem_object *obj; - struct intel_engine_cs *engine; - enum intel_engine_id id; + struct i915_gem_engines_iter it; + struct intel_context *ce; struct i915_vma *vma; unsigned int flags = PIN_USER; unsigned int n; - int err; + int err = 0; /* * Sanity check shrinking huge-paged object -- make sure nothing blows @@ -1521,12 +1770,14 @@ static int igt_shrink_thp(void *arg) if (!igt_can_allocate_thp(i915)) { pr_info("missing THP support, skipping\n"); - return 0; + goto out_vm; } obj = i915_gem_object_create_shmem(i915, SZ_2M); - if (IS_ERR(obj)) - return PTR_ERR(obj); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_vm; + } vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { @@ -1548,16 +1799,19 @@ static int igt_shrink_thp(void *arg) goto out_unpin; n = 0; - for_each_engine(engine, i915, id) { - if (!intel_engine_can_store_dword(engine)) + + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + if (!intel_engine_can_store_dword(ce->engine)) continue; - err = gpu_write(vma, ctx, engine, n++, 0xdeadbeaf); + err = gpu_write(ce, vma, n++, 0xdeadbeaf); if (err) - goto out_unpin; + break; } - + i915_gem_context_unlock_engines(ctx); i915_vma_unpin(vma); + if (err) + goto out_close; /* * Now that the pages are *unpinned* shrink-all should invoke @@ -1583,16 +1837,17 @@ static int igt_shrink_thp(void *arg) while (n--) { err = cpu_check(obj, n, 0xdeadbeaf); if (err) - goto out_unpin; + break; } - out_unpin: i915_vma_unpin(vma); out_close: i915_vma_close(vma); out_put: i915_gem_object_put(obj); +out_vm: + i915_vm_put(vm); return err; } @@ -1601,6 +1856,7 @@ int i915_gem_huge_page_mock_selftests(void) { static const struct i915_subtest tests[] = { SUBTEST(igt_mock_exhaust_device_supported_pages), + SUBTEST(igt_mock_memory_region_huge_pages), SUBTEST(igt_mock_ppgtt_misaligned_dma), SUBTEST(igt_mock_ppgtt_huge_fill), SUBTEST(igt_mock_ppgtt_64K), @@ -1617,7 +1873,6 @@ int i915_gem_huge_page_mock_selftests(void) mkwrite_device_info(dev_priv)->ppgtt_type = INTEL_PPGTT_FULL; mkwrite_device_info(dev_priv)->ppgtt_size = 48; - mutex_lock(&dev_priv->drm.struct_mutex); ppgtt = i915_ppgtt_create(dev_priv); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); @@ -1643,9 +1898,7 @@ out_close: i915_vm_put(&ppgtt->vm); out_unlock: - mutex_unlock(&dev_priv->drm.struct_mutex); drm_dev_put(&dev_priv->drm); - return err; } @@ -1656,12 +1909,12 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_ppgtt_pin_update), SUBTEST(igt_tmpfs_fallback), SUBTEST(igt_ppgtt_exhaust_huge), - SUBTEST(igt_ppgtt_gemfs_huge), - SUBTEST(igt_ppgtt_internal_huge), + SUBTEST(igt_ppgtt_smoke_huge), + SUBTEST(igt_ppgtt_sanity_check), }; struct drm_file *file; struct i915_gem_context *ctx; - intel_wakeref_t wakeref; + struct i915_address_space *vm; int err; if (!HAS_PPGTT(i915)) { @@ -1676,25 +1929,21 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915) if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - ctx = live_context(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); - goto out_unlock; + goto out_file; } - if (ctx->vm) - ctx->vm->scrub_64K = true; + mutex_lock(&ctx->mutex); + vm = i915_gem_context_vm(ctx); + if (vm) + WRITE_ONCE(vm->scrub_64K, true); + mutex_unlock(&ctx->mutex); err = i915_subtests(tests, ctx); -out_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - +out_file: mock_file_free(i915, file); - return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index d8804a847945..da8edee4fe0a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -5,6 +5,7 @@ #include "i915_selftest.h" +#include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "selftests/igt_flush_test.h" @@ -12,10 +13,9 @@ #include "huge_gem_object.h" #include "mock_context.h" -static int igt_client_fill(void *arg) +static int __igt_client_fill(struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = arg; - struct intel_context *ce = i915->engine[BCS0]->kernel_context; + struct intel_context *ce = engine->kernel_context; struct drm_i915_gem_object *obj; struct rnd_state prng; IGT_TIMEOUT(end); @@ -37,7 +37,7 @@ static int igt_client_fill(void *arg) pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__, phys_sz, sz, val); - obj = huge_gem_object(i915, phys_sz, sz); + obj = huge_gem_object(engine->i915, phys_sz, sz); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto err_flush; @@ -103,6 +103,28 @@ err_flush: return err; } +static int igt_client_fill(void *arg) +{ + int inst = 0; + + do { + struct intel_engine_cs *engine; + int err; + + engine = intel_engine_lookup_user(arg, + I915_ENGINE_CLASS_COPY, + inst++); + if (!engine) + return 0; + + err = __igt_client_fill(engine); + if (err == -ENOMEM) + err = 0; + if (err) + return err; + } while (1); +} + int i915_gem_client_blt_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index 0ff7a89aadca..2b29f6b4e1dd 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -7,13 +7,18 @@ #include <linux/prime_numbers.h> #include "gt/intel_gt.h" +#include "gt/intel_gt_pm.h" +#include "gt/intel_ring.h" #include "i915_selftest.h" #include "selftests/i915_random.h" -static int cpu_set(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 v) +struct context { + struct drm_i915_gem_object *obj; + struct intel_engine_cs *engine; +}; + +static int cpu_set(struct context *ctx, unsigned long offset, u32 v) { unsigned int needs_clflush; struct page *page; @@ -21,11 +26,11 @@ static int cpu_set(struct drm_i915_gem_object *obj, u32 *cpu; int err; - err = i915_gem_object_prepare_write(obj, &needs_clflush); + err = i915_gem_object_prepare_write(ctx->obj, &needs_clflush); if (err) return err; - page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); + page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); cpu = map + offset_in_page(offset); @@ -38,14 +43,12 @@ static int cpu_set(struct drm_i915_gem_object *obj, drm_clflush_virt_range(cpu, sizeof(*cpu)); kunmap_atomic(map); - i915_gem_object_finish_access(obj); + i915_gem_object_finish_access(ctx->obj); return 0; } -static int cpu_get(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 *v) +static int cpu_get(struct context *ctx, unsigned long offset, u32 *v) { unsigned int needs_clflush; struct page *page; @@ -53,11 +56,11 @@ static int cpu_get(struct drm_i915_gem_object *obj, u32 *cpu; int err; - err = i915_gem_object_prepare_read(obj, &needs_clflush); + err = i915_gem_object_prepare_read(ctx->obj, &needs_clflush); if (err) return err; - page = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); + page = i915_gem_object_get_page(ctx->obj, offset >> PAGE_SHIFT); map = kmap_atomic(page); cpu = map + offset_in_page(offset); @@ -67,136 +70,137 @@ static int cpu_get(struct drm_i915_gem_object *obj, *v = *cpu; kunmap_atomic(map); - i915_gem_object_finish_access(obj); + i915_gem_object_finish_access(ctx->obj); return 0; } -static int gtt_set(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 v) +static int gtt_set(struct context *ctx, unsigned long offset, u32 v) { struct i915_vma *vma; u32 __iomem *map; - int err; + int err = 0; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_gtt_domain(obj, true); - i915_gem_object_unlock(obj); + i915_gem_object_lock(ctx->obj); + err = i915_gem_object_set_to_gtt_domain(ctx->obj, true); + i915_gem_object_unlock(ctx->obj); if (err) return err; - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) return PTR_ERR(vma); + intel_gt_pm_get(vma->vm->gt); + map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); - if (IS_ERR(map)) - return PTR_ERR(map); + if (IS_ERR(map)) { + err = PTR_ERR(map); + goto out_rpm; + } iowrite32(v, &map[offset / sizeof(*map)]); i915_vma_unpin_iomap(vma); - return 0; +out_rpm: + intel_gt_pm_put(vma->vm->gt); + return err; } -static int gtt_get(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 *v) +static int gtt_get(struct context *ctx, unsigned long offset, u32 *v) { struct i915_vma *vma; u32 __iomem *map; - int err; + int err = 0; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_gtt_domain(obj, false); - i915_gem_object_unlock(obj); + i915_gem_object_lock(ctx->obj); + err = i915_gem_object_set_to_gtt_domain(ctx->obj, false); + i915_gem_object_unlock(ctx->obj); if (err) return err; - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE); + vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, PIN_MAPPABLE); if (IS_ERR(vma)) return PTR_ERR(vma); + intel_gt_pm_get(vma->vm->gt); + map = i915_vma_pin_iomap(vma); i915_vma_unpin(vma); - if (IS_ERR(map)) - return PTR_ERR(map); + if (IS_ERR(map)) { + err = PTR_ERR(map); + goto out_rpm; + } *v = ioread32(&map[offset / sizeof(*map)]); i915_vma_unpin_iomap(vma); - return 0; +out_rpm: + intel_gt_pm_put(vma->vm->gt); + return err; } -static int wc_set(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 v) +static int wc_set(struct context *ctx, unsigned long offset, u32 v) { u32 *map; int err; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_wc_domain(obj, true); - i915_gem_object_unlock(obj); + i915_gem_object_lock(ctx->obj); + err = i915_gem_object_set_to_wc_domain(ctx->obj, true); + i915_gem_object_unlock(ctx->obj); if (err) return err; - map = i915_gem_object_pin_map(obj, I915_MAP_WC); + map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC); if (IS_ERR(map)) return PTR_ERR(map); map[offset / sizeof(*map)] = v; - i915_gem_object_unpin_map(obj); + i915_gem_object_unpin_map(ctx->obj); return 0; } -static int wc_get(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 *v) +static int wc_get(struct context *ctx, unsigned long offset, u32 *v) { u32 *map; int err; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_wc_domain(obj, false); - i915_gem_object_unlock(obj); + i915_gem_object_lock(ctx->obj); + err = i915_gem_object_set_to_wc_domain(ctx->obj, false); + i915_gem_object_unlock(ctx->obj); if (err) return err; - map = i915_gem_object_pin_map(obj, I915_MAP_WC); + map = i915_gem_object_pin_map(ctx->obj, I915_MAP_WC); if (IS_ERR(map)) return PTR_ERR(map); *v = map[offset / sizeof(*map)]; - i915_gem_object_unpin_map(obj); + i915_gem_object_unpin_map(ctx->obj); return 0; } -static int gpu_set(struct drm_i915_gem_object *obj, - unsigned long offset, - u32 v) +static int gpu_set(struct context *ctx, unsigned long offset, u32 v) { - struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_request *rq; struct i915_vma *vma; u32 *cs; int err; - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_gtt_domain(obj, true); - i915_gem_object_unlock(obj); + i915_gem_object_lock(ctx->obj); + err = i915_gem_object_set_to_gtt_domain(ctx->obj, true); + i915_gem_object_unlock(ctx->obj); if (err) return err; - vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0); + vma = i915_gem_object_ggtt_pin(ctx->obj, NULL, 0, 0, 0); if (IS_ERR(vma)) return PTR_ERR(vma); - rq = i915_request_create(i915->engine[RCS0]->kernel_context); + rq = i915_request_create(ctx->engine->kernel_context); if (IS_ERR(rq)) { i915_vma_unpin(vma); return PTR_ERR(rq); @@ -209,12 +213,12 @@ static int gpu_set(struct drm_i915_gem_object *obj, return PTR_ERR(cs); } - if (INTEL_GEN(i915) >= 8) { + if (INTEL_GEN(ctx->engine->i915) >= 8) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | 1 << 22; *cs++ = lower_32_bits(i915_ggtt_offset(vma) + offset); *cs++ = upper_32_bits(i915_ggtt_offset(vma) + offset); *cs++ = v; - } else if (INTEL_GEN(i915) >= 4) { + } else if (INTEL_GEN(ctx->engine->i915) >= 4) { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; *cs++ = 0; *cs++ = i915_ggtt_offset(vma) + offset; @@ -239,32 +243,34 @@ static int gpu_set(struct drm_i915_gem_object *obj, return err; } -static bool always_valid(struct drm_i915_private *i915) +static bool always_valid(struct context *ctx) { return true; } -static bool needs_fence_registers(struct drm_i915_private *i915) +static bool needs_fence_registers(struct context *ctx) { - return !intel_gt_is_wedged(&i915->gt); -} + struct intel_gt *gt = ctx->engine->gt; -static bool needs_mi_store_dword(struct drm_i915_private *i915) -{ - if (intel_gt_is_wedged(&i915->gt)) + if (intel_gt_is_wedged(gt)) return false; - if (!HAS_ENGINE(i915, RCS0)) + return gt->ggtt->num_fences; +} + +static bool needs_mi_store_dword(struct context *ctx) +{ + if (intel_gt_is_wedged(ctx->engine->gt)) return false; - return intel_engine_can_store_dword(i915->engine[RCS0]); + return intel_engine_can_store_dword(ctx->engine); } static const struct igt_coherency_mode { const char *name; - int (*set)(struct drm_i915_gem_object *, unsigned long offset, u32 v); - int (*get)(struct drm_i915_gem_object *, unsigned long offset, u32 *v); - bool (*valid)(struct drm_i915_private *i915); + int (*set)(struct context *ctx, unsigned long offset, u32 v); + int (*get)(struct context *ctx, unsigned long offset, u32 *v); + bool (*valid)(struct context *ctx); } igt_coherency_mode[] = { { "cpu", cpu_set, cpu_get, always_valid }, { "gtt", gtt_set, gtt_get, needs_fence_registers }, @@ -273,19 +279,37 @@ static const struct igt_coherency_mode { { }, }; +static struct intel_engine_cs * +random_engine(struct drm_i915_private *i915, struct rnd_state *prng) +{ + struct intel_engine_cs *engine; + unsigned int count; + + count = 0; + for_each_uabi_engine(engine, i915) + count++; + + count = i915_prandom_u32_max_state(count, prng); + for_each_uabi_engine(engine, i915) + if (count-- == 0) + return engine; + + return NULL; +} + static int igt_gem_coherency(void *arg) { const unsigned int ncachelines = PAGE_SIZE/64; - I915_RND_STATE(prng); struct drm_i915_private *i915 = arg; const struct igt_coherency_mode *read, *write, *over; - struct drm_i915_gem_object *obj; - intel_wakeref_t wakeref; unsigned long count, n; u32 *offsets, *values; + I915_RND_STATE(prng); + struct context ctx; int err = 0; - /* We repeatedly write, overwrite and read from a sequence of + /* + * We repeatedly write, overwrite and read from a sequence of * cachelines in order to try and detect incoherency (unflushed writes * from either the CPU or GPU). Each setter/getter uses our cache * domain API which should prevent incoherency. @@ -299,34 +323,36 @@ static int igt_gem_coherency(void *arg) values = offsets + ncachelines; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + ctx.engine = random_engine(i915, &prng); + GEM_BUG_ON(!ctx.engine); + pr_info("%s: using %s\n", __func__, ctx.engine->name); + for (over = igt_coherency_mode; over->name; over++) { if (!over->set) continue; - if (!over->valid(i915)) + if (!over->valid(&ctx)) continue; for (write = igt_coherency_mode; write->name; write++) { if (!write->set) continue; - if (!write->valid(i915)) + if (!write->valid(&ctx)) continue; for (read = igt_coherency_mode; read->name; read++) { if (!read->get) continue; - if (!read->valid(i915)) + if (!read->valid(&ctx)) continue; for_each_prime_number_from(count, 1, ncachelines) { - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto unlock; + ctx.obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(ctx.obj)) { + err = PTR_ERR(ctx.obj); + goto free; } i915_random_reorder(offsets, ncachelines, &prng); @@ -334,7 +360,7 @@ static int igt_gem_coherency(void *arg) values[n] = prandom_u32_state(&prng); for (n = 0; n < count; n++) { - err = over->set(obj, offsets[n], ~values[n]); + err = over->set(&ctx, offsets[n], ~values[n]); if (err) { pr_err("Failed to set stale value[%ld/%ld] in object using %s, err=%d\n", n, count, over->name, err); @@ -343,7 +369,7 @@ static int igt_gem_coherency(void *arg) } for (n = 0; n < count; n++) { - err = write->set(obj, offsets[n], values[n]); + err = write->set(&ctx, offsets[n], values[n]); if (err) { pr_err("Failed to set value[%ld/%ld] in object using %s, err=%d\n", n, count, write->name, err); @@ -354,7 +380,7 @@ static int igt_gem_coherency(void *arg) for (n = 0; n < count; n++) { u32 found; - err = read->get(obj, offsets[n], &found); + err = read->get(&ctx, offsets[n], &found); if (err) { pr_err("Failed to get value[%ld/%ld] in object using %s, err=%d\n", n, count, read->name, err); @@ -372,20 +398,18 @@ static int igt_gem_coherency(void *arg) } } - i915_gem_object_put(obj); + i915_gem_object_put(ctx.obj); } } } } -unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); +free: kfree(offsets); return err; put_object: - i915_gem_object_put(obj); - goto unlock; + i915_gem_object_put(ctx.obj); + goto free; } int i915_gem_coherency_live_selftests(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 3e6f4a65d356..62fabc023a83 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -8,6 +8,7 @@ #include "gem/i915_gem_pm.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" #include "gt/intel_reset.h" #include "i915_selftest.h" @@ -31,7 +32,6 @@ static int live_nop_switch(void *arg) struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; struct i915_gem_context **ctx; - enum intel_engine_id id; struct igt_live_test t; struct drm_file *file; unsigned long n; @@ -52,23 +52,21 @@ static int live_nop_switch(void *arg) if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&i915->drm.struct_mutex); - ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL); if (!ctx) { err = -ENOMEM; - goto out_unlock; + goto out_file; } for (n = 0; n < nctx; n++) { ctx[n] = live_context(i915, file); if (IS_ERR(ctx[n])) { err = PTR_ERR(ctx[n]); - goto out_unlock; + goto out_file; } } - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { struct i915_request *rq; unsigned long end_time, prime; ktime_t times[2] = {}; @@ -78,7 +76,7 @@ static int live_nop_switch(void *arg) rq = igt_request_alloc(ctx[n], engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto out_unlock; + goto out_file; } i915_request_add(rq); } @@ -86,7 +84,7 @@ static int live_nop_switch(void *arg) pr_err("Failed to populated %d contexts\n", nctx); intel_gt_set_wedged(&i915->gt); err = -EIO; - goto out_unlock; + goto out_file; } times[1] = ktime_get_raw(); @@ -96,7 +94,7 @@ static int live_nop_switch(void *arg) err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) - goto out_unlock; + goto out_file; end_time = jiffies + i915_selftest.timeout_jiffies; for_each_prime_number_from(prime, 2, 8192) { @@ -106,7 +104,7 @@ static int live_nop_switch(void *arg) rq = igt_request_alloc(ctx[n % nctx], engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); - goto out_unlock; + goto out_file; } /* @@ -142,7 +140,7 @@ static int live_nop_switch(void *arg) err = igt_live_test_end(&t); if (err) - goto out_unlock; + goto out_file; pr_info("Switch latencies on %s: 1 = %lluns, %lu = %lluns\n", engine->name, @@ -150,8 +148,235 @@ static int live_nop_switch(void *arg) prime - 1, div64_u64(ktime_to_ns(times[1]), prime - 1)); } -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); +out_file: + mock_file_free(i915, file); + return err; +} + +struct parallel_switch { + struct task_struct *tsk; + struct intel_context *ce[2]; +}; + +static int __live_parallel_switch1(void *data) +{ + struct parallel_switch *arg = data; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + struct i915_request *rq = NULL; + int err, n; + + err = 0; + for (n = 0; !err && n < ARRAY_SIZE(arg->ce); n++) { + struct i915_request *prev = rq; + + rq = i915_request_create(arg->ce[n]); + if (IS_ERR(rq)) { + i915_request_put(prev); + return PTR_ERR(rq); + } + + i915_request_get(rq); + if (prev) { + err = i915_request_await_dma_fence(rq, &prev->fence); + i915_request_put(prev); + } + + i915_request_add(rq); + } + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(rq); + if (err) + return err; + + count++; + } while (!__igt_timeout(end_time, NULL)); + + pr_info("%s: %lu switches (sync)\n", arg->ce[0]->engine->name, count); + return 0; +} + +static int __live_parallel_switchN(void *data) +{ + struct parallel_switch *arg = data; + struct i915_request *rq = NULL; + IGT_TIMEOUT(end_time); + unsigned long count; + int n; + + count = 0; + do { + for (n = 0; n < ARRAY_SIZE(arg->ce); n++) { + struct i915_request *prev = rq; + int err = 0; + + rq = i915_request_create(arg->ce[n]); + if (IS_ERR(rq)) { + i915_request_put(prev); + return PTR_ERR(rq); + } + + i915_request_get(rq); + if (prev) { + err = i915_request_await_dma_fence(rq, &prev->fence); + i915_request_put(prev); + } + + i915_request_add(rq); + if (err) { + i915_request_put(rq); + return err; + } + } + + count++; + } while (!__igt_timeout(end_time, NULL)); + i915_request_put(rq); + + pr_info("%s: %lu switches (many)\n", arg->ce[0]->engine->name, count); + return 0; +} + +static int live_parallel_switch(void *arg) +{ + struct drm_i915_private *i915 = arg; + static int (* const func[])(void *arg) = { + __live_parallel_switch1, + __live_parallel_switchN, + NULL, + }; + struct parallel_switch *data = NULL; + struct i915_gem_engines *engines; + struct i915_gem_engines_iter it; + int (* const *fn)(void *arg); + struct i915_gem_context *ctx; + struct intel_context *ce; + struct drm_file *file; + int n, m, count; + int err = 0; + + /* + * Check we can process switches on all engines simultaneously. + */ + + if (!DRIVER_CAPS(i915)->has_logical_contexts) + return 0; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_file; + } + + engines = i915_gem_context_lock_engines(ctx); + count = engines->num_engines; + + data = kcalloc(count, sizeof(*data), GFP_KERNEL); + if (!data) { + i915_gem_context_unlock_engines(ctx); + err = -ENOMEM; + goto out_file; + } + + m = 0; /* Use the first context as our template for the engines */ + for_each_gem_engine(ce, engines, it) { + err = intel_context_pin(ce); + if (err) { + i915_gem_context_unlock_engines(ctx); + goto out; + } + data[m++].ce[0] = intel_context_get(ce); + } + i915_gem_context_unlock_engines(ctx); + + /* Clone the same set of engines into the other contexts */ + for (n = 1; n < ARRAY_SIZE(data->ce); n++) { + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out; + } + + for (m = 0; m < count; m++) { + if (!data[m].ce[0]) + continue; + + ce = intel_context_create(ctx, data[m].ce[0]->engine); + if (IS_ERR(ce)) + goto out; + + err = intel_context_pin(ce); + if (err) { + intel_context_put(ce); + goto out; + } + + data[m].ce[n] = ce; + } + } + + for (fn = func; !err && *fn; fn++) { + struct igt_live_test t; + int n; + + err = igt_live_test_begin(&t, i915, __func__, ""); + if (err) + break; + + for (n = 0; n < count; n++) { + if (!data[n].ce[0]) + continue; + + data[n].tsk = kthread_run(*fn, &data[n], + "igt/parallel:%s", + data[n].ce[0]->engine->name); + if (IS_ERR(data[n].tsk)) { + err = PTR_ERR(data[n].tsk); + break; + } + get_task_struct(data[n].tsk); + } + + yield(); /* start all threads before we kthread_stop() */ + + for (n = 0; n < count; n++) { + int status; + + if (IS_ERR_OR_NULL(data[n].tsk)) + continue; + + status = kthread_stop(data[n].tsk); + if (status && !err) + err = status; + + put_task_struct(data[n].tsk); + data[n].tsk = NULL; + } + + if (igt_live_test_end(&t)) + err = -EIO; + } + +out: + for (n = 0; n < count; n++) { + for (m = 0; m < ARRAY_SIZE(data->ce); m++) { + if (!data[n].ce[m]) + continue; + + intel_context_unpin(data[n].ce[m]); + intel_context_put(data[n].ce[m]); + } + } + kfree(data); +out_file: mock_file_free(i915, file); return err; } @@ -166,28 +391,20 @@ static unsigned long fake_page_count(struct drm_i915_gem_object *obj) return huge_gem_object_dma_size(obj) >> PAGE_SHIFT; } -static int gpu_fill(struct drm_i915_gem_object *obj, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, +static int gpu_fill(struct intel_context *ce, + struct drm_i915_gem_object *obj, unsigned int dw) { - struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; struct i915_vma *vma; int err; - GEM_BUG_ON(obj->base.size > vm->total); - GEM_BUG_ON(!intel_engine_can_store_dword(engine)); + GEM_BUG_ON(obj->base.size > ce->vm->total); + GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); - vma = i915_vma_instance(obj, vm, NULL); + vma = i915_vma_instance(obj, ce->vm, NULL); if (IS_ERR(vma)) return PTR_ERR(vma); - i915_gem_object_lock(obj); - err = i915_gem_object_set_to_gtt_domain(obj, true); - i915_gem_object_unlock(obj); - if (err) - return err; - err = i915_vma_pin(vma, 0, 0, PIN_HIGH | PIN_USER); if (err) return err; @@ -200,9 +417,7 @@ static int gpu_fill(struct drm_i915_gem_object *obj, * whilst checking that each context provides a unique view * into the object. */ - err = igt_gpu_fill_dw(vma, - ctx, - engine, + err = igt_gpu_fill_dw(ce, vma, (dw * real_page_count(obj)) << PAGE_SHIFT | (dw * sizeof(u32)), real_page_count(obj), @@ -305,22 +520,21 @@ static int file_add_object(struct drm_file *file, } static struct drm_i915_gem_object * -create_test_object(struct i915_gem_context *ctx, +create_test_object(struct i915_address_space *vm, struct drm_file *file, struct list_head *objects) { struct drm_i915_gem_object *obj; - struct i915_address_space *vm = ctx->vm ?: &ctx->i915->ggtt.vm; u64 size; int err; /* Keep in GEM's good graces */ - i915_retire_requests(ctx->i915); + intel_gt_retire_requests(vm->gt); size = min(vm->total / 2, 1024ull * DW_PER_PAGE * PAGE_SIZE); size = round_down(size, DW_PER_PAGE * PAGE_SIZE); - obj = huge_gem_object(ctx->i915, DW_PER_PAGE * PAGE_SIZE, size); + obj = huge_gem_object(vm->i915, DW_PER_PAGE * PAGE_SIZE, size); if (IS_ERR(obj)) return obj; @@ -348,11 +562,49 @@ static unsigned long max_dwords(struct drm_i915_gem_object *obj) return npages / DW_PER_PAGE; } +static void throttle_release(struct i915_request **q, int count) +{ + int i; + + for (i = 0; i < count; i++) { + if (IS_ERR_OR_NULL(q[i])) + continue; + + i915_request_put(fetch_and_zero(&q[i])); + } +} + +static int throttle(struct intel_context *ce, + struct i915_request **q, int count) +{ + int i; + + if (!IS_ERR_OR_NULL(q[0])) { + if (i915_request_wait(q[0], + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT) < 0) + return -EINTR; + + i915_request_put(q[0]); + } + + for (i = 0; i < count - 1; i++) + q[i] = q[i + 1]; + + q[i] = intel_context_create_request(ce); + if (IS_ERR(q[i])) + return PTR_ERR(q[i]); + + i915_request_get(q[i]); + i915_request_add(q[i]); + + return 0; +} + static int igt_ctx_exec(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; - enum intel_engine_id id; int err = -ENODEV; /* @@ -364,9 +616,10 @@ static int igt_ctx_exec(void *arg) if (!DRIVER_CAPS(i915)->has_logical_contexts) return 0; - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { struct drm_i915_gem_object *obj = NULL; unsigned long ncontexts, ndwords, dw; + struct i915_request *tq[5] = {}; struct igt_live_test t; struct drm_file *file; IGT_TIMEOUT(end_time); @@ -382,39 +635,53 @@ static int igt_ctx_exec(void *arg) if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&i915->drm.struct_mutex); - err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) - goto out_unlock; + goto out_file; ncontexts = 0; ndwords = 0; dw = 0; while (!time_after(jiffies, end_time)) { struct i915_gem_context *ctx; + struct intel_context *ce; - ctx = live_context(i915, file); + ctx = kernel_context(i915); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); - goto out_unlock; + goto out_file; } + ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); + GEM_BUG_ON(IS_ERR(ce)); + if (!obj) { - obj = create_test_object(ctx, file, &objects); + obj = create_test_object(ce->vm, file, &objects); if (IS_ERR(obj)) { err = PTR_ERR(obj); - goto out_unlock; + intel_context_put(ce); + kernel_context_close(ctx); + goto out_file; } } - err = gpu_fill(obj, ctx, engine, dw); + err = gpu_fill(ce, obj, dw); if (err) { - pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), - engine->name, ctx->hw_id, - yesno(!!ctx->vm), err); - goto out_unlock; + engine->name, + yesno(!!rcu_access_pointer(ctx->vm)), + err); + intel_context_put(ce); + kernel_context_close(ctx); + goto out_file; + } + + err = throttle(ce, tq, ARRAY_SIZE(tq)); + if (err) { + intel_context_put(ce); + kernel_context_close(ctx); + goto out_file; } if (++dw == max_dwords(obj)) { @@ -424,6 +691,9 @@ static int igt_ctx_exec(void *arg) ndwords++; ncontexts++; + + intel_context_put(ce); + kernel_context_close(ctx); } pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", @@ -441,10 +711,10 @@ static int igt_ctx_exec(void *arg) dw += rem; } -out_unlock: +out_file: + throttle_release(tq, ARRAY_SIZE(tq)); if (igt_live_test_end(&t)) err = -EIO; - mutex_unlock(&i915->drm.struct_mutex); mock_file_free(i915, file); if (err) @@ -459,9 +729,9 @@ out_unlock: static int igt_shared_ctx_exec(void *arg) { struct drm_i915_private *i915 = arg; + struct i915_request *tq[5] = {}; struct i915_gem_context *parent; struct intel_engine_cs *engine; - enum intel_engine_id id; struct igt_live_test t; struct drm_file *file; int err = 0; @@ -478,24 +748,22 @@ static int igt_shared_ctx_exec(void *arg) if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&i915->drm.struct_mutex); - parent = live_context(i915, file); if (IS_ERR(parent)) { err = PTR_ERR(parent); - goto out_unlock; + goto out_file; } if (!parent->vm) { /* not full-ppgtt; nothing to share */ err = 0; - goto out_unlock; + goto out_file; } err = igt_live_test_begin(&t, i915, __func__, ""); if (err) - goto out_unlock; + goto out_file; - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { unsigned long ncontexts, ndwords, dw; struct drm_i915_gem_object *obj = NULL; IGT_TIMEOUT(end_time); @@ -509,6 +777,7 @@ static int igt_shared_ctx_exec(void *arg) ncontexts = 0; while (!time_after(jiffies, end_time)) { struct i915_gem_context *ctx; + struct intel_context *ce; ctx = kernel_context(i915); if (IS_ERR(ctx)) { @@ -516,23 +785,38 @@ static int igt_shared_ctx_exec(void *arg) goto out_test; } + mutex_lock(&ctx->mutex); __assign_ppgtt(ctx, parent->vm); + mutex_unlock(&ctx->mutex); + + ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); + GEM_BUG_ON(IS_ERR(ce)); if (!obj) { - obj = create_test_object(parent, file, &objects); + obj = create_test_object(parent->vm, file, &objects); if (IS_ERR(obj)) { err = PTR_ERR(obj); + intel_context_put(ce); kernel_context_close(ctx); goto out_test; } } - err = gpu_fill(obj, ctx, engine, dw); + err = gpu_fill(ce, obj, dw); if (err) { - pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), - engine->name, ctx->hw_id, - yesno(!!ctx->vm), err); + engine->name, + yesno(!!rcu_access_pointer(ctx->vm)), + err); + intel_context_put(ce); + kernel_context_close(ctx); + goto out_test; + } + + err = throttle(ce, tq, ARRAY_SIZE(tq)); + if (err) { + intel_context_put(ce); kernel_context_close(ctx); goto out_test; } @@ -545,6 +829,7 @@ static int igt_shared_ctx_exec(void *arg) ndwords++; ncontexts++; + intel_context_put(ce); kernel_context_close(ctx); } pr_info("Submitted %lu contexts to %s, filling %lu dwords\n", @@ -562,16 +847,13 @@ static int igt_shared_ctx_exec(void *arg) dw += rem; } - mutex_unlock(&i915->drm.struct_mutex); i915_gem_drain_freed_objects(i915); - mutex_lock(&i915->drm.struct_mutex); } out_test: + throttle_release(tq, ARRAY_SIZE(tq)); if (igt_live_test_end(&t)) err = -EIO; -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - +out_file: mock_file_free(i915, file); return err; } @@ -604,6 +886,8 @@ static struct i915_vma *rpcs_query_batch(struct i915_vma *vma) __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); + intel_gt_chipset_flush(vma->vm->gt); + vma = i915_vma_instance(obj, vma->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -681,10 +965,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj, if (err) goto skip_request; - i915_vma_unpin(batch); - i915_vma_close(batch); - i915_vma_put(batch); - + i915_vma_unpin_and_release(&batch, 0); i915_vma_unpin(vma); *rq_out = i915_request_get(rq); @@ -698,8 +979,7 @@ skip_request: err_request: i915_request_add(rq); err_batch: - i915_vma_unpin(batch); - i915_vma_put(batch); + i915_vma_unpin_and_release(&batch, 0); err_vma: i915_vma_unpin(vma); @@ -860,8 +1140,8 @@ out: igt_spinner_end(spin); if ((flags & TEST_IDLE) && ret == 0) { - ret = i915_gem_wait_for_idle(ce->engine->i915, - 0, MAX_SCHEDULE_TIMEOUT); + ret = intel_gt_wait_for_idle(ce->engine->gt, + MAX_SCHEDULE_TIMEOUT); if (ret) return ret; @@ -887,7 +1167,7 @@ __sseu_test(const char *name, if (ret) return ret; - ret = __intel_context_reconfigure_sseu(ce, sseu); + ret = intel_context_reconfigure_sseu(ce, sseu); if (ret) goto out_spin; @@ -908,106 +1188,97 @@ __igt_ctx_sseu(struct drm_i915_private *i915, const char *name, unsigned int flags) { - struct intel_engine_cs *engine = i915->engine[RCS0]; struct drm_i915_gem_object *obj; - struct i915_gem_context *ctx; - struct intel_context *ce; - struct intel_sseu pg_sseu; - struct drm_file *file; - int ret; - - if (INTEL_GEN(i915) < 9 || !engine) - return 0; - - if (!RUNTIME_INFO(i915)->sseu.has_slice_pg) - return 0; + int inst = 0; + int ret = 0; - if (hweight32(engine->sseu.slice_mask) < 2) + if (INTEL_GEN(i915) < 9 || !RUNTIME_INFO(i915)->sseu.has_slice_pg) return 0; - /* - * Gen11 VME friendly power-gated configuration with half enabled - * sub-slices. - */ - pg_sseu = engine->sseu; - pg_sseu.slice_mask = 1; - pg_sseu.subslice_mask = - ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2)); - - pr_info("SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n", - name, flags, hweight32(engine->sseu.slice_mask), - hweight32(pg_sseu.slice_mask)); - - file = mock_file(i915); - if (IS_ERR(file)) - return PTR_ERR(file); - if (flags & TEST_RESET) igt_global_reset_lock(&i915->gt); - mutex_lock(&i915->drm.struct_mutex); - - ctx = live_context(i915, file); - if (IS_ERR(ctx)) { - ret = PTR_ERR(ctx); - goto out_unlock; - } - i915_gem_context_clear_bannable(ctx); /* to reset and beyond! */ - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); if (IS_ERR(obj)) { ret = PTR_ERR(obj); goto out_unlock; } - ce = i915_gem_context_get_engine(ctx, RCS0); - if (IS_ERR(ce)) { - ret = PTR_ERR(ce); - goto out_put; - } + do { + struct intel_engine_cs *engine; + struct intel_context *ce; + struct intel_sseu pg_sseu; - ret = intel_context_pin(ce); - if (ret) - goto out_context; + engine = intel_engine_lookup_user(i915, + I915_ENGINE_CLASS_RENDER, + inst++); + if (!engine) + break; - /* First set the default mask. */ - ret = __sseu_test(name, flags, ce, obj, engine->sseu); - if (ret) - goto out_fail; + if (hweight32(engine->sseu.slice_mask) < 2) + continue; - /* Then set a power-gated configuration. */ - ret = __sseu_test(name, flags, ce, obj, pg_sseu); - if (ret) - goto out_fail; + /* + * Gen11 VME friendly power-gated configuration with + * half enabled sub-slices. + */ + pg_sseu = engine->sseu; + pg_sseu.slice_mask = 1; + pg_sseu.subslice_mask = + ~(~0 << (hweight32(engine->sseu.subslice_mask) / 2)); + + pr_info("%s: SSEU subtest '%s', flags=%x, def_slices=%u, pg_slices=%u\n", + engine->name, name, flags, + hweight32(engine->sseu.slice_mask), + hweight32(pg_sseu.slice_mask)); + + ce = intel_context_create(engine->kernel_context->gem_context, + engine); + if (IS_ERR(ce)) { + ret = PTR_ERR(ce); + goto out_put; + } - /* Back to defaults. */ - ret = __sseu_test(name, flags, ce, obj, engine->sseu); - if (ret) - goto out_fail; + ret = intel_context_pin(ce); + if (ret) + goto out_ce; - /* One last power-gated configuration for the road. */ - ret = __sseu_test(name, flags, ce, obj, pg_sseu); - if (ret) - goto out_fail; + /* First set the default mask. */ + ret = __sseu_test(name, flags, ce, obj, engine->sseu); + if (ret) + goto out_unpin; + + /* Then set a power-gated configuration. */ + ret = __sseu_test(name, flags, ce, obj, pg_sseu); + if (ret) + goto out_unpin; + + /* Back to defaults. */ + ret = __sseu_test(name, flags, ce, obj, engine->sseu); + if (ret) + goto out_unpin; -out_fail: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + /* One last power-gated configuration for the road. */ + ret = __sseu_test(name, flags, ce, obj, pg_sseu); + if (ret) + goto out_unpin; + +out_unpin: + intel_context_unpin(ce); +out_ce: + intel_context_put(ce); + } while (!ret); + + if (igt_flush_test(i915)) ret = -EIO; - intel_context_unpin(ce); -out_context: - intel_context_put(ce); out_put: i915_gem_object_put(obj); out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - if (flags & TEST_RESET) igt_global_reset_unlock(&i915->gt); - mock_file_free(i915, file); - if (ret) pr_err("%s: Failed with %d!\n", name, ret); @@ -1041,6 +1312,7 @@ static int igt_ctx_readonly(void *arg) { struct drm_i915_private *i915 = arg; struct drm_i915_gem_object *obj = NULL; + struct i915_request *tq[5] = {}; struct i915_address_space *vm; struct i915_gem_context *ctx; unsigned long idx, ndwords, dw; @@ -1061,52 +1333,63 @@ static int igt_ctx_readonly(void *arg) if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&i915->drm.struct_mutex); - err = igt_live_test_begin(&t, i915, __func__, ""); if (err) - goto out_unlock; + goto out_file; ctx = live_context(i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); - goto out_unlock; + goto out_file; } - vm = ctx->vm ?: &i915->ggtt.alias->vm; + rcu_read_lock(); + vm = rcu_dereference(ctx->vm) ?: &i915->ggtt.alias->vm; if (!vm || !vm->has_read_only) { + rcu_read_unlock(); err = 0; - goto out_unlock; + goto out_file; } + rcu_read_unlock(); ndwords = 0; dw = 0; while (!time_after(jiffies, end_time)) { - struct intel_engine_cs *engine; - unsigned int id; + struct i915_gem_engines_iter it; + struct intel_context *ce; - for_each_engine(engine, i915, id) { - if (!intel_engine_can_store_dword(engine)) + for_each_gem_engine(ce, + i915_gem_context_lock_engines(ctx), it) { + if (!intel_engine_can_store_dword(ce->engine)) continue; if (!obj) { - obj = create_test_object(ctx, file, &objects); + obj = create_test_object(ce->vm, file, &objects); if (IS_ERR(obj)) { err = PTR_ERR(obj); - goto out_unlock; + i915_gem_context_unlock_engines(ctx); + goto out_file; } if (prandom_u32_state(&prng) & 1) i915_gem_object_set_readonly(obj); } - err = gpu_fill(obj, ctx, engine, dw); + err = gpu_fill(ce, obj, dw); if (err) { - pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) in ctx %u [full-ppgtt? %s], err=%d\n", + pr_err("Failed to fill dword %lu [%lu/%lu] with gpu (%s) [full-ppgtt? %s], err=%d\n", ndwords, dw, max_dwords(obj), - engine->name, ctx->hw_id, - yesno(!!ctx->vm), err); - goto out_unlock; + ce->engine->name, + yesno(!!rcu_access_pointer(ctx->vm)), + err); + i915_gem_context_unlock_engines(ctx); + goto out_file; + } + + err = throttle(ce, tq, ARRAY_SIZE(tq)); + if (err) { + i915_gem_context_unlock_engines(ctx); + goto out_file; } if (++dw == max_dwords(obj)) { @@ -1115,6 +1398,7 @@ static int igt_ctx_readonly(void *arg) } ndwords++; } + i915_gem_context_unlock_engines(ctx); } pr_info("Submitted %lu dwords (across %u engines)\n", ndwords, RUNTIME_INFO(i915)->num_engines); @@ -1137,19 +1421,19 @@ static int igt_ctx_readonly(void *arg) dw += rem; } -out_unlock: +out_file: + throttle_release(tq, ARRAY_SIZE(tq)); if (igt_live_test_end(&t)) err = -EIO; - mutex_unlock(&i915->drm.struct_mutex); mock_file_free(i915, file); return err; } -static int check_scratch(struct i915_gem_context *ctx, u64 offset) +static int check_scratch(struct i915_address_space *vm, u64 offset) { struct drm_mm_node *node = - __drm_mm_interval_first(&ctx->vm->mm, + __drm_mm_interval_first(&vm->mm, offset, offset + sizeof(u32) - 1); if (!node || node->start > offset) return 0; @@ -1167,6 +1451,7 @@ static int write_to_scratch(struct i915_gem_context *ctx, { struct drm_i915_private *i915 = ctx->i915; struct drm_i915_gem_object *obj; + struct i915_address_space *vm; struct i915_request *rq; struct i915_vma *vma; u32 *cmd; @@ -1197,17 +1482,20 @@ static int write_to_scratch(struct i915_gem_context *ctx, __i915_gem_object_flush_map(obj, 0, 64); i915_gem_object_unpin_map(obj); - vma = i915_vma_instance(obj, ctx->vm, NULL); + intel_gt_chipset_flush(engine->gt); + + vm = i915_gem_context_get_vm_rcu(ctx); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); - goto err; + goto err_vm; } err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); if (err) - goto err; + goto err_vm; - err = check_scratch(ctx, offset); + err = check_scratch(vm, offset); if (err) goto err_unpin; @@ -1229,12 +1517,11 @@ static int write_to_scratch(struct i915_gem_context *ctx, if (err) goto skip_request; - i915_vma_unpin(vma); - i915_vma_close(vma); - i915_vma_put(vma); + i915_vma_unpin_and_release(&vma, 0); i915_request_add(rq); + i915_vm_put(vm); return 0; skip_request: @@ -1243,6 +1530,8 @@ err_request: i915_request_add(rq); err_unpin: i915_vma_unpin(vma); +err_vm: + i915_vm_put(vm); err: i915_gem_object_put(obj); return err; @@ -1254,6 +1543,7 @@ static int read_from_scratch(struct i915_gem_context *ctx, { struct drm_i915_private *i915 = ctx->i915; struct drm_i915_gem_object *obj; + struct i915_address_space *vm; const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */ const u32 result = 0x100; struct i915_request *rq; @@ -1296,17 +1586,20 @@ static int read_from_scratch(struct i915_gem_context *ctx, i915_gem_object_flush_map(obj); i915_gem_object_unpin_map(obj); - vma = i915_vma_instance(obj, ctx->vm, NULL); + intel_gt_chipset_flush(engine->gt); + + vm = i915_gem_context_get_vm_rcu(ctx); + vma = i915_vma_instance(obj, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); - goto err; + goto err_vm; } err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED); if (err) - goto err; + goto err_vm; - err = check_scratch(ctx, offset); + err = check_scratch(vm, offset); if (err) goto err_unpin; @@ -1337,12 +1630,12 @@ static int read_from_scratch(struct i915_gem_context *ctx, err = i915_gem_object_set_to_cpu_domain(obj, false); i915_gem_object_unlock(obj); if (err) - goto err; + goto err_vm; cmd = i915_gem_object_pin_map(obj, I915_MAP_WB); if (IS_ERR(cmd)) { err = PTR_ERR(cmd); - goto err; + goto err_vm; } *value = cmd[result / sizeof(*cmd)]; @@ -1357,6 +1650,8 @@ err_request: i915_request_add(rq); err_unpin: i915_vma_unpin(vma); +err_vm: + i915_vm_put(vm); err: i915_gem_object_put(obj); return err; @@ -1371,7 +1666,6 @@ static int igt_vm_isolation(void *arg) struct drm_file *file; I915_RND_STATE(prng); unsigned long count; - unsigned int id; u64 vm_total; int err; @@ -1387,34 +1681,32 @@ static int igt_vm_isolation(void *arg) if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&i915->drm.struct_mutex); - err = igt_live_test_begin(&t, i915, __func__, ""); if (err) - goto out_unlock; + goto out_file; ctx_a = live_context(i915, file); if (IS_ERR(ctx_a)) { err = PTR_ERR(ctx_a); - goto out_unlock; + goto out_file; } ctx_b = live_context(i915, file); if (IS_ERR(ctx_b)) { err = PTR_ERR(ctx_b); - goto out_unlock; + goto out_file; } /* We can only test vm isolation, if the vm are distinct */ if (ctx_a->vm == ctx_b->vm) - goto out_unlock; + goto out_file; vm_total = ctx_a->vm->total; GEM_BUG_ON(ctx_b->vm->total != vm_total); vm_total -= I915_GTT_PAGE_SIZE; count = 0; - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { IGT_TIMEOUT(end_time); unsigned long this = 0; @@ -1436,7 +1728,7 @@ static int igt_vm_isolation(void *arg) err = read_from_scratch(ctx_b, engine, offset, &value); if (err) - goto out_unlock; + goto out_file; if (value) { pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n", @@ -1445,7 +1737,7 @@ static int igt_vm_isolation(void *arg) lower_32_bits(offset), this); err = -EINVAL; - goto out_unlock; + goto out_file; } this++; @@ -1455,30 +1747,13 @@ static int igt_vm_isolation(void *arg) pr_info("Checked %lu scratch offsets across %d engines\n", count, RUNTIME_INFO(i915)->num_engines); -out_unlock: +out_file: if (igt_live_test_end(&t)) err = -EIO; - mutex_unlock(&i915->drm.struct_mutex); - mock_file_free(i915, file); return err; } -static __maybe_unused const char * -__engine_name(struct drm_i915_private *i915, intel_engine_mask_t engines) -{ - struct intel_engine_cs *engine; - intel_engine_mask_t tmp; - - if (engines == ALL_ENGINES) - return "all"; - - for_each_engine_masked(engine, i915, engines, tmp) - return engine->name; - - return "none"; -} - static bool skip_unused_engines(struct intel_context *ce, void *data) { return !ce->state; @@ -1506,13 +1781,9 @@ static int mock_context_barrier(void *arg) * a request; useful for retiring old state after loading new. */ - mutex_lock(&i915->drm.struct_mutex); - ctx = mock_context(i915, "mock"); - if (!ctx) { - err = -ENOMEM; - goto unlock; - } + if (!ctx) + return -ENOMEM; counter = 0; err = context_barrier_task(ctx, 0, @@ -1585,8 +1856,6 @@ static int mock_context_barrier(void *arg) out: mock_context_close(ctx); -unlock: - mutex_unlock(&i915->drm.struct_mutex); return err; #undef pr_fmt #define pr_fmt(x) x @@ -1614,6 +1883,7 @@ int i915_gem_context_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_nop_switch), + SUBTEST(live_parallel_switch), SUBTEST(igt_ctx_exec), SUBTEST(igt_ctx_readonly), SUBTEST(igt_ctx_sseu), diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 1d27babff0ce..29b2077b73d2 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -10,6 +10,7 @@ #include "gt/intel_gt_pm.h" #include "huge_gem_object.h" #include "i915_selftest.h" +#include "selftests/i915_random.h" #include "selftests/igt_flush_test.h" struct tile { @@ -76,18 +77,103 @@ static u64 tiled_offset(const struct tile *tile, u64 v) static int check_partial_mapping(struct drm_i915_gem_object *obj, const struct tile *tile, - unsigned long end_time) + struct rnd_state *prng) { - const unsigned int nreal = obj->scratch / PAGE_SIZE; const unsigned long npages = obj->base.size / PAGE_SIZE; + struct i915_ggtt_view view; struct i915_vma *vma; unsigned long page; + u32 __iomem *io; + struct page *p; + unsigned int n; + u64 offset; + u32 *cpu; int err; - if (igt_timeout(end_time, - "%s: timed out before tiling=%d stride=%d\n", - __func__, tile->tiling, tile->stride)) - return -EINTR; + err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); + if (err) { + pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", + tile->tiling, tile->stride, err); + return err; + } + + GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); + GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); + + i915_gem_object_lock(obj); + err = i915_gem_object_set_to_gtt_domain(obj, true); + i915_gem_object_unlock(obj); + if (err) { + pr_err("Failed to flush to GTT write domain; err=%d\n", err); + return err; + } + + page = i915_prandom_u32_max_state(npages, prng); + view = compute_partial_view(obj, page, MIN_CHUNK_PAGES); + + vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); + if (IS_ERR(vma)) { + pr_err("Failed to pin partial view: offset=%lu; err=%d\n", + page, (int)PTR_ERR(vma)); + return PTR_ERR(vma); + } + + n = page - view.partial.offset; + GEM_BUG_ON(n >= view.partial.size); + + io = i915_vma_pin_iomap(vma); + i915_vma_unpin(vma); + if (IS_ERR(io)) { + pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", + page, (int)PTR_ERR(io)); + err = PTR_ERR(io); + goto out; + } + + iowrite32(page, io + n * PAGE_SIZE / sizeof(*io)); + i915_vma_unpin_iomap(vma); + + offset = tiled_offset(tile, page << PAGE_SHIFT); + if (offset >= obj->base.size) + goto out; + + intel_gt_flush_ggtt_writes(&to_i915(obj->base.dev)->gt); + + p = i915_gem_object_get_page(obj, offset >> PAGE_SHIFT); + cpu = kmap(p) + offset_in_page(offset); + drm_clflush_virt_range(cpu, sizeof(*cpu)); + if (*cpu != (u32)page) { + pr_err("Partial view for %lu [%u] (offset=%llu, size=%u [%llu, row size %u], fence=%d, tiling=%d, stride=%d) misalignment, expected write to page (%llu + %u [0x%llx]) of 0x%x, found 0x%x\n", + page, n, + view.partial.offset, + view.partial.size, + vma->size >> PAGE_SHIFT, + tile->tiling ? tile_row_pages(obj) : 0, + vma->fence ? vma->fence->id : -1, tile->tiling, tile->stride, + offset >> PAGE_SHIFT, + (unsigned int)offset_in_page(offset), + offset, + (u32)page, *cpu); + err = -EINVAL; + } + *cpu = 0; + drm_clflush_virt_range(cpu, sizeof(*cpu)); + kunmap(p); + +out: + i915_vma_destroy(vma); + return err; +} + +static int check_partial_mappings(struct drm_i915_gem_object *obj, + const struct tile *tile, + unsigned long end_time) +{ + const unsigned int nreal = obj->scratch / PAGE_SIZE; + const unsigned long npages = obj->base.size / PAGE_SIZE; + struct i915_vma *vma; + unsigned long page; + int err; err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); if (err) { @@ -170,11 +256,42 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, return err; i915_vma_destroy(vma); + + if (igt_timeout(end_time, + "%s: timed out after tiling=%d stride=%d\n", + __func__, tile->tiling, tile->stride)) + return -EINTR; } return 0; } +static unsigned int +setup_tile_size(struct tile *tile, struct drm_i915_private *i915) +{ + if (INTEL_GEN(i915) <= 2) { + tile->height = 16; + tile->width = 128; + tile->size = 11; + } else if (tile->tiling == I915_TILING_Y && + HAS_128_BYTE_Y_TILING(i915)) { + tile->height = 32; + tile->width = 128; + tile->size = 12; + } else { + tile->height = 8; + tile->width = 512; + tile->size = 12; + } + + if (INTEL_GEN(i915) < 4) + return 8192 / tile->width; + else if (INTEL_GEN(i915) < 7) + return 128 * I965_FENCE_MAX_PITCH_VAL / tile->width; + else + return 128 * GEN7_FENCE_MAX_PITCH_VAL / tile->width; +} + static int igt_partial_tiling(void *arg) { const unsigned int nreal = 1 << 12; /* largest tile row x2 */ @@ -184,6 +301,9 @@ static int igt_partial_tiling(void *arg) int tiling; int err; + if (!i915_ggtt_has_aperture(&i915->ggtt)) + return 0; + /* We want to check the page mapping and fencing of a large object * mmapped through the GTT. The object we create is larger than can * possibly be mmaped as a whole, and so we must use partial GGTT vma. @@ -205,7 +325,6 @@ static int igt_partial_tiling(void *arg) goto out; } - mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (1) { @@ -219,7 +338,7 @@ static int igt_partial_tiling(void *arg) tile.swizzle = I915_BIT_6_SWIZZLE_NONE; tile.tiling = I915_TILING_NONE; - err = check_partial_mapping(obj, &tile, end); + err = check_partial_mappings(obj, &tile, end); if (err && err != -EINTR) goto out_unlock; } @@ -241,10 +360,10 @@ static int igt_partial_tiling(void *arg) tile.tiling = tiling; switch (tiling) { case I915_TILING_X: - tile.swizzle = i915->mm.bit_6_swizzle_x; + tile.swizzle = i915->ggtt.bit_6_swizzle_x; break; case I915_TILING_Y: - tile.swizzle = i915->mm.bit_6_swizzle_y; + tile.swizzle = i915->ggtt.bit_6_swizzle_y; break; } @@ -253,31 +372,11 @@ static int igt_partial_tiling(void *arg) tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) continue; - if (INTEL_GEN(i915) <= 2) { - tile.height = 16; - tile.width = 128; - tile.size = 11; - } else if (tile.tiling == I915_TILING_Y && - HAS_128_BYTE_Y_TILING(i915)) { - tile.height = 32; - tile.width = 128; - tile.size = 12; - } else { - tile.height = 8; - tile.width = 512; - tile.size = 12; - } - - if (INTEL_GEN(i915) < 4) - max_pitch = 8192 / tile.width; - else if (INTEL_GEN(i915) < 7) - max_pitch = 128 * I965_FENCE_MAX_PITCH_VAL / tile.width; - else - max_pitch = 128 * GEN7_FENCE_MAX_PITCH_VAL / tile.width; + max_pitch = setup_tile_size(&tile, i915); for (pitch = max_pitch; pitch; pitch >>= 1) { tile.stride = tile.width * pitch; - err = check_partial_mapping(obj, &tile, end); + err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) @@ -285,7 +384,7 @@ static int igt_partial_tiling(void *arg) if (pitch > 2 && INTEL_GEN(i915) >= 4) { tile.stride = tile.width * (pitch - 1); - err = check_partial_mapping(obj, &tile, end); + err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) @@ -294,7 +393,7 @@ static int igt_partial_tiling(void *arg) if (pitch < max_pitch && INTEL_GEN(i915) >= 4) { tile.stride = tile.width * (pitch + 1); - err = check_partial_mapping(obj, &tile, end); + err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) @@ -305,7 +404,7 @@ static int igt_partial_tiling(void *arg) if (INTEL_GEN(i915) >= 4) { for_each_prime_number(pitch, max_pitch) { tile.stride = tile.width * pitch; - err = check_partial_mapping(obj, &tile, end); + err = check_partial_mappings(obj, &tile, end); if (err == -EINTR) goto next_tiling; if (err) @@ -318,7 +417,100 @@ next_tiling: ; out_unlock: intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); + i915_gem_object_unpin_pages(obj); +out: + i915_gem_object_put(obj); + return err; +} + +static int igt_smoke_tiling(void *arg) +{ + const unsigned int nreal = 1 << 12; /* largest tile row x2 */ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + intel_wakeref_t wakeref; + I915_RND_STATE(prng); + unsigned long count; + IGT_TIMEOUT(end); + int err; + + if (!i915_ggtt_has_aperture(&i915->ggtt)) + return 0; + + /* + * igt_partial_tiling() does an exhastive check of partial tiling + * chunking, but will undoubtably run out of time. Here, we do a + * randomised search and hope over many runs of 1s with different + * seeds we will do a thorough check. + * + * Remember to look at the st_seed if we see a flip-flop in BAT! + */ + + if (i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) + return 0; + + obj = huge_gem_object(i915, + nreal << PAGE_SHIFT, + (1 + next_prime_number(i915->ggtt.vm.total >> PAGE_SHIFT)) << PAGE_SHIFT); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = i915_gem_object_pin_pages(obj); + if (err) { + pr_err("Failed to allocate %u pages (%lu total), err=%d\n", + nreal, obj->base.size / PAGE_SIZE, err); + goto out; + } + + wakeref = intel_runtime_pm_get(&i915->runtime_pm); + + count = 0; + do { + struct tile tile; + + tile.tiling = + i915_prandom_u32_max_state(I915_TILING_Y + 1, &prng); + switch (tile.tiling) { + case I915_TILING_NONE: + tile.height = 1; + tile.width = 1; + tile.size = 0; + tile.stride = 0; + tile.swizzle = I915_BIT_6_SWIZZLE_NONE; + break; + + case I915_TILING_X: + tile.swizzle = i915->ggtt.bit_6_swizzle_x; + break; + case I915_TILING_Y: + tile.swizzle = i915->ggtt.bit_6_swizzle_y; + break; + } + + if (tile.swizzle == I915_BIT_6_SWIZZLE_9_17 || + tile.swizzle == I915_BIT_6_SWIZZLE_9_10_17) + continue; + + if (tile.tiling != I915_TILING_NONE) { + unsigned int max_pitch = setup_tile_size(&tile, i915); + + tile.stride = + i915_prandom_u32_max_state(max_pitch, &prng); + tile.stride = (1 + tile.stride) * tile.width; + if (INTEL_GEN(i915) < 4) + tile.stride = rounddown_pow_of_two(tile.stride); + } + + err = check_partial_mapping(obj, &tile, &prng); + if (err) + break; + + count++; + } while (!__igt_timeout(end, NULL)); + + pr_info("%s: Completed %lu trials\n", __func__, count); + + intel_runtime_pm_put(&i915->runtime_pm, wakeref); i915_gem_object_unpin_pages(obj); out: i915_gem_object_put(obj); @@ -329,20 +521,19 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct intel_engine_cs *engine; - enum intel_engine_id id; - struct i915_vma *vma; - int err; - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); - if (IS_ERR(vma)) - return PTR_ERR(vma); + for_each_uabi_engine(engine, i915) { + struct i915_request *rq; + struct i915_vma *vma; + int err; - err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) - return err; + vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); + if (IS_ERR(vma)) + return PTR_ERR(vma); - for_each_engine(engine, i915, id) { - struct i915_request *rq; + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + return err; rq = i915_request_create(engine->kernel_context); if (IS_ERR(rq)) { @@ -358,12 +549,13 @@ static int make_obj_busy(struct drm_i915_gem_object *obj) i915_vma_unlock(vma); i915_request_add(rq); + i915_vma_unpin(vma); + if (err) + return err; } - i915_vma_unpin(vma); i915_gem_object_put(obj); /* leave it only alive via its active ref */ - - return err; + return 0; } static bool assert_mmap_offset(struct drm_i915_private *i915, @@ -386,21 +578,14 @@ static bool assert_mmap_offset(struct drm_i915_private *i915, static void disable_retire_worker(struct drm_i915_private *i915) { i915_gem_driver_unregister__shrinker(i915); - intel_gt_pm_get(&i915->gt); - - cancel_delayed_work_sync(&i915->gem.retire_work); - flush_work(&i915->gem.idle_work); + cancel_delayed_work_sync(&i915->gt.requests.retire_work); } static void restore_retire_worker(struct drm_i915_private *i915) { + igt_flush_test(i915); intel_gt_pm_put(&i915->gt); - - mutex_lock(&i915->drm.struct_mutex); - igt_flush_test(i915, I915_WAIT_LOCKED); - mutex_unlock(&i915->drm.struct_mutex); - i915_gem_driver_register__shrinker(i915); } @@ -490,9 +675,7 @@ static int igt_mmap_offset_exhaustion(void *arg) goto out; } - mutex_lock(&i915->drm.struct_mutex); err = make_obj_busy(obj); - mutex_unlock(&i915->drm.struct_mutex); if (err) { pr_err("[loop %d] Failed to busy the object\n", loop); goto err_obj; @@ -515,6 +698,7 @@ int i915_gem_mman_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_partial_tiling), + SUBTEST(igt_smoke_tiling), SUBTEST(igt_mmap_offset_exhaustion), }; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c index c21d747e7d05..e8132aca0bb6 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c @@ -3,40 +3,241 @@ * Copyright © 2019 Intel Corporation */ +#include <linux/sort.h> + #include "gt/intel_gt.h" +#include "gt/intel_engine_user.h" #include "i915_selftest.h" +#include "gem/i915_gem_context.h" #include "selftests/igt_flush_test.h" +#include "selftests/i915_random.h" #include "selftests/mock_drm.h" #include "huge_gem_object.h" #include "mock_context.h" -static int igt_fill_blt(void *arg) +static int wrap_ktime_compare(const void *A, const void *B) +{ + const ktime_t *a = A, *b = B; + + return ktime_compare(*a, *b); +} + +static int __perf_fill_blt(struct drm_i915_gem_object *obj) +{ + struct drm_i915_private *i915 = to_i915(obj->base.dev); + int inst = 0; + + do { + struct intel_engine_cs *engine; + ktime_t t[5]; + int pass; + int err; + + engine = intel_engine_lookup_user(i915, + I915_ENGINE_CLASS_COPY, + inst++); + if (!engine) + return 0; + + for (pass = 0; pass < ARRAY_SIZE(t); pass++) { + struct intel_context *ce = engine->kernel_context; + ktime_t t0, t1; + + t0 = ktime_get(); + + err = i915_gem_object_fill_blt(obj, ce, 0); + if (err) + return err; + + err = i915_gem_object_wait(obj, + I915_WAIT_ALL, + MAX_SCHEDULE_TIMEOUT); + if (err) + return err; + + t1 = ktime_get(); + t[pass] = ktime_sub(t1, t0); + } + + sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL); + pr_info("%s: blt %zd KiB fill: %lld MiB/s\n", + engine->name, + obj->base.size >> 10, + div64_u64(mul_u32_u32(4 * obj->base.size, + 1000 * 1000 * 1000), + t[1] + 2 * t[2] + t[3]) >> 20); + } while (1); +} + +static int perf_fill_blt(void *arg) { struct drm_i915_private *i915 = arg; - struct intel_context *ce = i915->engine[BCS0]->kernel_context; - struct drm_i915_gem_object *obj; + static const unsigned long sizes[] = { + SZ_4K, + SZ_64K, + SZ_2M, + SZ_64M + }; + int i; + + for (i = 0; i < ARRAY_SIZE(sizes); i++) { + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_create_internal(i915, sizes[i]); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = __perf_fill_blt(obj); + i915_gem_object_put(obj); + if (err) + return err; + } + + return 0; +} + +static int __perf_copy_blt(struct drm_i915_gem_object *src, + struct drm_i915_gem_object *dst) +{ + struct drm_i915_private *i915 = to_i915(src->base.dev); + int inst = 0; + + do { + struct intel_engine_cs *engine; + ktime_t t[5]; + int pass; + + engine = intel_engine_lookup_user(i915, + I915_ENGINE_CLASS_COPY, + inst++); + if (!engine) + return 0; + + for (pass = 0; pass < ARRAY_SIZE(t); pass++) { + struct intel_context *ce = engine->kernel_context; + ktime_t t0, t1; + int err; + + t0 = ktime_get(); + + err = i915_gem_object_copy_blt(src, dst, ce); + if (err) + return err; + + err = i915_gem_object_wait(dst, + I915_WAIT_ALL, + MAX_SCHEDULE_TIMEOUT); + if (err) + return err; + + t1 = ktime_get(); + t[pass] = ktime_sub(t1, t0); + } + + sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL); + pr_info("%s: blt %zd KiB copy: %lld MiB/s\n", + engine->name, + src->base.size >> 10, + div64_u64(mul_u32_u32(4 * src->base.size, + 1000 * 1000 * 1000), + t[1] + 2 * t[2] + t[3]) >> 20); + } while (1); +} + +static int perf_copy_blt(void *arg) +{ + struct drm_i915_private *i915 = arg; + static const unsigned long sizes[] = { + SZ_4K, + SZ_64K, + SZ_2M, + SZ_64M + }; + int i; + + for (i = 0; i < ARRAY_SIZE(sizes); i++) { + struct drm_i915_gem_object *src, *dst; + int err; + + src = i915_gem_object_create_internal(i915, sizes[i]); + if (IS_ERR(src)) + return PTR_ERR(src); + + dst = i915_gem_object_create_internal(i915, sizes[i]); + if (IS_ERR(dst)) { + err = PTR_ERR(dst); + goto err_src; + } + + err = __perf_copy_blt(src, dst); + + i915_gem_object_put(dst); +err_src: + i915_gem_object_put(src); + if (err) + return err; + } + + return 0; +} + +struct igt_thread_arg { + struct drm_i915_private *i915; struct rnd_state prng; + unsigned int n_cpus; +}; + +static int igt_fill_blt_thread(void *arg) +{ + struct igt_thread_arg *thread = arg; + struct drm_i915_private *i915 = thread->i915; + struct rnd_state *prng = &thread->prng; + struct drm_i915_gem_object *obj; + struct i915_gem_context *ctx; + struct intel_context *ce; + struct drm_file *file; + unsigned int prio; IGT_TIMEOUT(end); - u32 *vaddr; - int err = 0; + int err; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_file; + } - prandom_seed_state(&prng, i915_selftest.random_seed); + prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng); + ctx->sched.priority = I915_USER_PRIORITY(prio); - /* - * XXX: needs some threads to scale all these tests, also maybe throw - * in submission from higher priority context to see if we are - * preempted for very large objects... - */ + ce = i915_gem_context_get_engine(ctx, BCS0); + GEM_BUG_ON(IS_ERR(ce)); do { const u32 max_block_size = S16_MAX * PAGE_SIZE; - u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng)); - u32 phys_sz = sz % (max_block_size + 1); - u32 val = prandom_u32_state(&prng); + u32 val = prandom_u32_state(prng); + u64 total = ce->vm->total; + u32 phys_sz; + u32 sz; + u32 *vaddr; u32 i; + /* + * If we have a tiny shared address space, like for the GGTT + * then we can't be too greedy. + */ + if (i915_is_ggtt(ce->vm)) + total = div64_u64(total, thread->n_cpus); + + sz = min_t(u64, total >> 4, prandom_u32_state(prng)); + phys_sz = sz % (max_block_size + 1); + sz = round_up(sz, PAGE_SIZE); phys_sz = round_up(phys_sz, PAGE_SIZE); @@ -65,9 +266,7 @@ static int igt_fill_blt(void *arg) if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) obj->cache_dirty = true; - mutex_lock(&i915->drm.struct_mutex); err = i915_gem_object_fill_blt(obj, ce, val); - mutex_unlock(&i915->drm.struct_mutex); if (err) goto err_unpin; @@ -100,28 +299,56 @@ err_flush: if (err == -ENOMEM) err = 0; + intel_context_put(ce); +out_file: + mock_file_free(i915, file); return err; } -static int igt_copy_blt(void *arg) +static int igt_copy_blt_thread(void *arg) { - struct drm_i915_private *i915 = arg; - struct intel_context *ce = i915->engine[BCS0]->kernel_context; + struct igt_thread_arg *thread = arg; + struct drm_i915_private *i915 = thread->i915; + struct rnd_state *prng = &thread->prng; struct drm_i915_gem_object *src, *dst; - struct rnd_state prng; + struct i915_gem_context *ctx; + struct intel_context *ce; + struct drm_file *file; + unsigned int prio; IGT_TIMEOUT(end); - u32 *vaddr; - int err = 0; + int err; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); - prandom_seed_state(&prng, i915_selftest.random_seed); + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_file; + } + + prio = i915_prandom_u32_max_state(I915_PRIORITY_MAX, prng); + ctx->sched.priority = I915_USER_PRIORITY(prio); + + ce = i915_gem_context_get_engine(ctx, BCS0); + GEM_BUG_ON(IS_ERR(ce)); do { const u32 max_block_size = S16_MAX * PAGE_SIZE; - u32 sz = min_t(u64, ce->vm->total >> 4, prandom_u32_state(&prng)); - u32 phys_sz = sz % (max_block_size + 1); - u32 val = prandom_u32_state(&prng); + u32 val = prandom_u32_state(prng); + u64 total = ce->vm->total; + u32 phys_sz; + u32 sz; + u32 *vaddr; u32 i; + if (i915_is_ggtt(ce->vm)) + total = div64_u64(total, thread->n_cpus); + + sz = min_t(u64, total >> 4, prandom_u32_state(prng)); + phys_sz = sz % (max_block_size + 1); + sz = round_up(sz, PAGE_SIZE); phys_sz = round_up(phys_sz, PAGE_SIZE); @@ -166,9 +393,7 @@ static int igt_copy_blt(void *arg) if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)) dst->cache_dirty = true; - mutex_lock(&i915->drm.struct_mutex); err = i915_gem_object_copy_blt(src, dst, ce); - mutex_unlock(&i915->drm.struct_mutex); if (err) goto err_unpin; @@ -205,12 +430,85 @@ err_flush: if (err == -ENOMEM) err = 0; + intel_context_put(ce); +out_file: + mock_file_free(i915, file); + return err; +} + +static int igt_threaded_blt(struct drm_i915_private *i915, + int (*blt_fn)(void *arg)) +{ + struct igt_thread_arg *thread; + struct task_struct **tsk; + I915_RND_STATE(prng); + unsigned int n_cpus; + unsigned int i; + int err = 0; + + n_cpus = num_online_cpus() + 1; + + tsk = kcalloc(n_cpus, sizeof(struct task_struct *), GFP_KERNEL); + if (!tsk) + return 0; + + thread = kcalloc(n_cpus, sizeof(struct igt_thread_arg), GFP_KERNEL); + if (!thread) { + kfree(tsk); + return 0; + } + + for (i = 0; i < n_cpus; ++i) { + thread[i].i915 = i915; + thread[i].n_cpus = n_cpus; + thread[i].prng = + I915_RND_STATE_INITIALIZER(prandom_u32_state(&prng)); + + tsk[i] = kthread_run(blt_fn, &thread[i], "igt/blt-%d", i); + if (IS_ERR(tsk[i])) { + err = PTR_ERR(tsk[i]); + break; + } + + get_task_struct(tsk[i]); + } + + yield(); /* start all threads before we kthread_stop() */ + + for (i = 0; i < n_cpus; ++i) { + int status; + + if (IS_ERR_OR_NULL(tsk[i])) + continue; + + status = kthread_stop(tsk[i]); + if (status && !err) + err = status; + + put_task_struct(tsk[i]); + } + + kfree(tsk); + kfree(thread); + return err; } +static int igt_fill_blt(void *arg) +{ + return igt_threaded_blt(arg, igt_fill_blt_thread); +} + +static int igt_copy_blt(void *arg) +{ + return igt_threaded_blt(arg, igt_copy_blt_thread); +} + int i915_gem_object_blt_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { + SUBTEST(perf_fill_blt), + SUBTEST(perf_copy_blt), SUBTEST(igt_fill_blt), SUBTEST(igt_copy_blt), }; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c index 94a15e3f6db8..34932871b3a5 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_phys.c @@ -25,9 +25,7 @@ static int mock_phys_object(void *arg) goto out; } - mutex_lock(&i915->drm.struct_mutex); err = i915_gem_object_attach_phys(obj, PAGE_SIZE); - mutex_unlock(&i915->drm.struct_mutex); if (err) { pr_err("i915_gem_object_attach_phys failed, err=%d\n", err); goto out_obj; diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c index 57ece53c1075..6718da20f35d 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c @@ -9,6 +9,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" +#include "gt/intel_gt.h" #include "i915_vma.h" #include "i915_drv.h" @@ -84,6 +85,8 @@ igt_emit_store_dw(struct i915_vma *vma, *cmd = MI_BATCH_BUFFER_END; i915_gem_object_unpin_map(obj); + intel_gt_chipset_flush(vma->vm->gt); + vma = i915_vma_instance(obj, vma->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); @@ -101,40 +104,35 @@ err: return ERR_PTR(err); } -int igt_gpu_fill_dw(struct i915_vma *vma, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u64 offset, - unsigned long count, - u32 val) +int igt_gpu_fill_dw(struct intel_context *ce, + struct i915_vma *vma, u64 offset, + unsigned long count, u32 val) { - struct i915_address_space *vm = ctx->vm ?: &engine->gt->ggtt->vm; struct i915_request *rq; struct i915_vma *batch; unsigned int flags; int err; - GEM_BUG_ON(vma->size > vm->total); - GEM_BUG_ON(!intel_engine_can_store_dword(engine)); + GEM_BUG_ON(!intel_engine_can_store_dword(ce->engine)); GEM_BUG_ON(!i915_vma_is_pinned(vma)); batch = igt_emit_store_dw(vma, offset, count, val); if (IS_ERR(batch)) return PTR_ERR(batch); - rq = igt_request_alloc(ctx, engine); + rq = intel_context_create_request(ce); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto err_batch; } flags = 0; - if (INTEL_GEN(vm->i915) <= 5) + if (INTEL_GEN(ce->vm->i915) <= 5) flags |= I915_DISPATCH_SECURE; - err = engine->emit_bb_start(rq, - batch->node.start, batch->node.size, - flags); + err = rq->engine->emit_bb_start(rq, + batch->node.start, batch->node.size, + flags); if (err) goto err_request; @@ -156,9 +154,7 @@ int igt_gpu_fill_dw(struct i915_vma *vma, i915_request_add(rq); - i915_vma_unpin(batch); - i915_vma_close(batch); - i915_vma_put(batch); + i915_vma_unpin_and_release(&batch, 0); return 0; @@ -167,7 +163,6 @@ skip_request: err_request: i915_request_add(rq); err_batch: - i915_vma_unpin(batch); - i915_vma_put(batch); + i915_vma_unpin_and_release(&batch, 0); return err; } diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h index 361a7ef866b0..4221cf84d175 100644 --- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h +++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.h @@ -11,9 +11,11 @@ struct i915_request; struct i915_gem_context; -struct intel_engine_cs; struct i915_vma; +struct intel_context; +struct intel_engine_cs; + struct i915_request * igt_request_alloc(struct i915_gem_context *ctx, struct intel_engine_cs *engine); @@ -23,11 +25,8 @@ igt_emit_store_dw(struct i915_vma *vma, unsigned long count, u32 val); -int igt_gpu_fill_dw(struct i915_vma *vma, - struct i915_gem_context *ctx, - struct intel_engine_cs *engine, - u64 offset, - unsigned long count, - u32 val); +int igt_gpu_fill_dw(struct intel_context *ce, + struct i915_vma *vma, u64 offset, + unsigned long count, u32 val); #endif /* __IGT_GEM_UTILS_H__ */ diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c index be8974ccff24..29b8984f0e47 100644 --- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c @@ -13,7 +13,6 @@ mock_context(struct drm_i915_private *i915, { struct i915_gem_context *ctx; struct i915_gem_engines *e; - int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -23,6 +22,8 @@ mock_context(struct drm_i915_private *i915, INIT_LIST_HEAD(&ctx->link); ctx->i915 = i915; + i915_gem_context_set_persistence(ctx); + mutex_init(&ctx->engines_mutex); e = default_engines(ctx); if (IS_ERR(e)) @@ -30,13 +31,8 @@ mock_context(struct drm_i915_private *i915, RCU_INIT_POINTER(ctx->engines, e); INIT_RADIX_TREE(&ctx->handles_vma, GFP_KERNEL); - INIT_LIST_HEAD(&ctx->hw_id_link); mutex_init(&ctx->mutex); - ret = i915_gem_context_pin_hw_id(ctx); - if (ret < 0) - goto err_engines; - if (name) { struct i915_ppgtt *ppgtt; @@ -48,14 +44,15 @@ mock_context(struct drm_i915_private *i915, if (!ppgtt) goto err_put; + mutex_lock(&ctx->mutex); __set_ppgtt(ctx, &ppgtt->vm); + mutex_unlock(&ctx->mutex); + i915_vm_put(&ppgtt->vm); } return ctx; -err_engines: - free_engines(rcu_access_pointer(ctx->engines)); err_free: kfree(ctx); return NULL; @@ -73,7 +70,7 @@ void mock_context_close(struct i915_gem_context *ctx) void mock_init_contexts(struct drm_i915_private *i915) { - init_contexts(i915); + init_contexts(&i915->gem.contexts); } struct i915_gem_context * @@ -82,8 +79,6 @@ live_context(struct drm_i915_private *i915, struct drm_file *file) struct i915_gem_context *ctx; int err; - lockdep_assert_held(&i915->drm.struct_mutex); - ctx = i915_gem_create_context(i915, 0); if (IS_ERR(ctx)) return ctx; diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index 09c68dda2098..55317081d48b 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -120,7 +120,6 @@ __dma_fence_signal__notify(struct dma_fence *fence, struct dma_fence_cb *cur, *tmp; lockdep_assert_held(fence->lock); - lockdep_assert_irqs_disabled(); list_for_each_entry_safe(cur, tmp, list, node) { INIT_LIST_HEAD(&cur->node); @@ -134,9 +133,10 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) const ktime_t timestamp = ktime_get(); struct intel_context *ce, *cn; struct list_head *pos, *next; + unsigned long flags; LIST_HEAD(signal); - spin_lock(&b->irq_lock); + spin_lock_irqsave(&b->irq_lock, flags); if (b->irq_armed && list_empty(&b->signalers)) __intel_breadcrumbs_disarm_irq(b); @@ -182,30 +182,23 @@ void intel_engine_breadcrumbs_irq(struct intel_engine_cs *engine) } } - spin_unlock(&b->irq_lock); + spin_unlock_irqrestore(&b->irq_lock, flags); list_for_each_safe(pos, next, &signal) { struct i915_request *rq = list_entry(pos, typeof(*rq), signal_link); struct list_head cb_list; - spin_lock(&rq->lock); + spin_lock_irqsave(&rq->lock, flags); list_replace(&rq->fence.cb_list, &cb_list); __dma_fence_signal__timestamp(&rq->fence, timestamp); __dma_fence_signal__notify(&rq->fence, &cb_list); - spin_unlock(&rq->lock); + spin_unlock_irqrestore(&rq->lock, flags); i915_request_put(rq); } } -void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine) -{ - local_irq_disable(); - intel_engine_breadcrumbs_irq(engine); - local_irq_enable(); -} - static void signal_irq_work(struct irq_work *work) { struct intel_engine_cs *engine = @@ -275,7 +268,6 @@ void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine) bool i915_request_enable_breadcrumb(struct i915_request *rq) { lockdep_assert_held(&rq->lock); - lockdep_assert_irqs_disabled(); if (test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; @@ -325,7 +317,6 @@ void i915_request_cancel_breadcrumb(struct i915_request *rq) struct intel_breadcrumbs *b = &rq->engine->breadcrumbs; lockdep_assert_held(&rq->lock); - lockdep_assert_irqs_disabled(); /* * We must wait for b->irq_lock so that we know the interrupt handler diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c index f55691d151ae..ee9d2bcd2c13 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.c +++ b/drivers/gpu/drm/i915/gt/intel_context.c @@ -13,6 +13,7 @@ #include "intel_context.h" #include "intel_engine.h" #include "intel_engine_pm.h" +#include "intel_ring.h" static struct i915_global_context { struct i915_global base; @@ -62,7 +63,7 @@ int __intel_context_do_pin(struct intel_context *ce) } err = 0; - with_intel_runtime_pm(&ce->engine->i915->runtime_pm, wakeref) + with_intel_runtime_pm(ce->engine->uncore->rpm, wakeref) err = ce->ops->pin(ce); if (err) goto err; @@ -134,10 +135,11 @@ static int __context_pin_state(struct i915_vma *vma) static void __context_unpin_state(struct i915_vma *vma) { - __i915_vma_unpin(vma); i915_vma_make_shrinkable(vma); + __i915_vma_unpin(vma); } +__i915_active_call static void __intel_context_retire(struct i915_active *active) { struct intel_context *ce = container_of(active, typeof(*ce), active); @@ -150,6 +152,7 @@ static void __intel_context_retire(struct i915_active *active) intel_timeline_unpin(ce->timeline); intel_ring_unpin(ce->ring); + intel_context_put(ce); } @@ -219,12 +222,20 @@ intel_context_init(struct intel_context *ce, struct i915_gem_context *ctx, struct intel_engine_cs *engine) { + struct i915_address_space *vm; + GEM_BUG_ON(!engine->cops); kref_init(&ce->ref); ce->gem_context = ctx; - ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm); + rcu_read_lock(); + vm = rcu_dereference(ctx->vm); + if (vm) + ce->vm = i915_vm_get(vm); + else + ce->vm = i915_vm_get(&engine->gt->ggtt->vm); + rcu_read_unlock(); if (ctx->timeline) ce->timeline = intel_timeline_get(ctx->timeline); @@ -238,7 +249,7 @@ intel_context_init(struct intel_context *ce, mutex_init(&ce->pin_mutex); - i915_active_init(ctx->i915, &ce->active, + i915_active_init(&ce->active, __intel_context_active, __intel_context_retire); } @@ -298,14 +309,14 @@ int intel_context_prepare_remote_request(struct intel_context *ce, /* Only suitable for use in remotely modifying this context */ GEM_BUG_ON(rq->hw_context == ce); - if (rq->timeline != tl) { /* beware timeline sharing */ + if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ err = mutex_lock_interruptible_nested(&tl->mutex, SINGLE_DEPTH_NESTING); if (err) return err; /* Queue this switch after current activity by this context. */ - err = i915_active_request_set(&tl->last_request, rq); + err = i915_active_fence_set(&tl->last_request, rq); mutex_unlock(&tl->mutex); if (err) return err; @@ -319,7 +330,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce, * words transfer the pinned ce object to tracked active request. */ GEM_BUG_ON(i915_active_is_idle(&ce->active)); - return i915_active_ref(&ce->active, rq->timeline, rq); + return i915_active_add_request(&ce->active, rq); } struct i915_request *intel_context_create_request(struct intel_context *ce) diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h index dd742ac2fbdb..68b3d317d959 100644 --- a/drivers/gpu/drm/i915/gt/intel_context.h +++ b/drivers/gpu/drm/i915/gt/intel_context.h @@ -12,6 +12,7 @@ #include "i915_active.h" #include "intel_context_types.h" #include "intel_engine_types.h" +#include "intel_ring_types.h" #include "intel_timeline_types.h" void intel_context_init(struct intel_context *ce, diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h index bf9cedfccbf0..6959b05ae5f8 100644 --- a/drivers/gpu/drm/i915/gt/intel_context_types.h +++ b/drivers/gpu/drm/i915/gt/intel_context_types.h @@ -58,6 +58,7 @@ struct intel_context { u32 *lrc_reg_state; u64 lrc_desc; + u32 tag; /* cookie passed to HW to track this context on submission */ unsigned int active_count; /* protected by timeline->mutex */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h index 22aab8593abf..bc3b72bfa9e3 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine.h +++ b/drivers/gpu/drm/i915/gt/intel_engine.h @@ -19,6 +19,7 @@ #include "intel_workarounds.h" struct drm_printer; +struct intel_gt; /* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, * but keeps the logic simple. Indeed, the whole purpose of this macro is just @@ -89,38 +90,6 @@ struct drm_printer; /* seqno size is actually only a uint32, but since we plan to use MI_FLUSH_DW to * do the writes, and that must have qw aligned offsets, simply pretend it's 8b. */ -enum intel_engine_hangcheck_action { - ENGINE_IDLE = 0, - ENGINE_WAIT, - ENGINE_ACTIVE_SEQNO, - ENGINE_ACTIVE_HEAD, - ENGINE_ACTIVE_SUBUNITS, - ENGINE_WAIT_KICK, - ENGINE_DEAD, -}; - -static inline const char * -hangcheck_action_to_str(const enum intel_engine_hangcheck_action a) -{ - switch (a) { - case ENGINE_IDLE: - return "idle"; - case ENGINE_WAIT: - return "wait"; - case ENGINE_ACTIVE_SEQNO: - return "active seqno"; - case ENGINE_ACTIVE_HEAD: - return "active head"; - case ENGINE_ACTIVE_SUBUNITS: - return "active subunits"; - case ENGINE_WAIT_KICK: - return "wait kick"; - case ENGINE_DEAD: - return "dead"; - } - - return "unknown"; -} static inline unsigned int execlists_num_ports(const struct intel_engine_execlists * const execlists) @@ -206,126 +175,13 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value) #define I915_HWS_CSB_WRITE_INDEX 0x1f #define CNL_HWS_CSB_WRITE_INDEX 0x2f -struct intel_ring * -intel_engine_create_ring(struct intel_engine_cs *engine, int size); -int intel_ring_pin(struct intel_ring *ring); -void intel_ring_reset(struct intel_ring *ring, u32 tail); -unsigned int intel_ring_update_space(struct intel_ring *ring); -void intel_ring_unpin(struct intel_ring *ring); -void intel_ring_free(struct kref *ref); - -static inline struct intel_ring *intel_ring_get(struct intel_ring *ring) -{ - kref_get(&ring->ref); - return ring; -} - -static inline void intel_ring_put(struct intel_ring *ring) -{ - kref_put(&ring->ref, intel_ring_free); -} - void intel_engine_stop(struct intel_engine_cs *engine); void intel_engine_cleanup(struct intel_engine_cs *engine); -int __must_check intel_ring_cacheline_align(struct i915_request *rq); - -u32 __must_check *intel_ring_begin(struct i915_request *rq, unsigned int n); - -static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) -{ - /* Dummy function. - * - * This serves as a placeholder in the code so that the reader - * can compare against the preceding intel_ring_begin() and - * check that the number of dwords emitted matches the space - * reserved for the command packet (i.e. the value passed to - * intel_ring_begin()). - */ - GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs); -} - -static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) -{ - return pos & (ring->size - 1); -} - -static inline bool -intel_ring_offset_valid(const struct intel_ring *ring, - unsigned int pos) -{ - if (pos & -ring->size) /* must be strictly within the ring */ - return false; - - if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */ - return false; - - return true; -} - -static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) -{ - /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ - u32 offset = addr - rq->ring->vaddr; - GEM_BUG_ON(offset > rq->ring->size); - return intel_ring_wrap(rq->ring, offset); -} - -static inline void -assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) -{ - GEM_BUG_ON(!intel_ring_offset_valid(ring, tail)); - - /* - * "Ring Buffer Use" - * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 - * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5 - * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5 - * "If the Ring Buffer Head Pointer and the Tail Pointer are on the - * same cacheline, the Head Pointer must not be greater than the Tail - * Pointer." - * - * We use ring->head as the last known location of the actual RING_HEAD, - * it may have advanced but in the worst case it is equally the same - * as ring->head and so we should never program RING_TAIL to advance - * into the same cacheline as ring->head. - */ -#define cacheline(a) round_down(a, CACHELINE_BYTES) - GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) && - tail < ring->head); -#undef cacheline -} - -static inline unsigned int -intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) -{ - /* Whilst writes to the tail are strictly order, there is no - * serialisation between readers and the writers. The tail may be - * read by i915_request_retire() just as it is being updated - * by execlists, as although the breadcrumb is complete, the context - * switch hasn't been seen. - */ - assert_ring_tail_valid(ring, tail); - ring->tail = tail; - return tail; -} - -static inline unsigned int -__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size) -{ - /* - * "If the Ring Buffer Head Pointer and the Tail Pointer are on the - * same cacheline, the Head Pointer must not be greater than the Tail - * Pointer." - */ - GEM_BUG_ON(!is_power_of_2(size)); - return (head - tail - CACHELINE_BYTES) & (size - 1); -} - -int intel_engines_init_mmio(struct drm_i915_private *i915); -int intel_engines_setup(struct drm_i915_private *i915); -int intel_engines_init(struct drm_i915_private *i915); -void intel_engines_cleanup(struct drm_i915_private *i915); +int intel_engines_init_mmio(struct intel_gt *gt); +int intel_engines_setup(struct intel_gt *gt); +int intel_engines_init(struct intel_gt *gt); +void intel_engines_cleanup(struct intel_gt *gt); int intel_engine_init_common(struct intel_engine_cs *engine); void intel_engine_cleanup_common(struct intel_engine_cs *engine); @@ -349,7 +205,6 @@ void intel_engine_init_execlists(struct intel_engine_cs *engine); void intel_engine_init_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_fini_breadcrumbs(struct intel_engine_cs *engine); -void intel_engine_signal_breadcrumbs(struct intel_engine_cs *engine); void intel_engine_disarm_breadcrumbs(struct intel_engine_cs *engine); static inline void @@ -422,8 +277,9 @@ static inline void __intel_engine_reset(struct intel_engine_cs *engine, engine->serial++; /* contexts lost */ } -bool intel_engine_is_idle(struct intel_engine_cs *engine); bool intel_engines_are_idle(struct intel_gt *gt); +bool intel_engine_is_idle(struct intel_engine_cs *engine); +void intel_engine_flush_submission(struct intel_engine_cs *engine); void intel_engines_reset_default_submission(struct intel_gt *gt); @@ -434,61 +290,6 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m, const char *header, ...); -static inline void intel_engine_context_in(struct intel_engine_cs *engine) -{ - unsigned long flags; - - if (READ_ONCE(engine->stats.enabled) == 0) - return; - - write_seqlock_irqsave(&engine->stats.lock, flags); - - if (engine->stats.enabled > 0) { - if (engine->stats.active++ == 0) - engine->stats.start = ktime_get(); - GEM_BUG_ON(engine->stats.active == 0); - } - - write_sequnlock_irqrestore(&engine->stats.lock, flags); -} - -static inline void intel_engine_context_out(struct intel_engine_cs *engine) -{ - unsigned long flags; - - if (READ_ONCE(engine->stats.enabled) == 0) - return; - - write_seqlock_irqsave(&engine->stats.lock, flags); - - if (engine->stats.enabled > 0) { - ktime_t last; - - if (engine->stats.active && --engine->stats.active == 0) { - /* - * Decrement the active context count and in case GPU - * is now idle add up to the running total. - */ - last = ktime_sub(ktime_get(), engine->stats.start); - - engine->stats.total = ktime_add(engine->stats.total, - last); - } else if (engine->stats.active == 0) { - /* - * After turning on engine stats, context out might be - * the first event in which case we account from the - * time stats gathering was turned on. - */ - last = ktime_sub(ktime_get(), engine->stats.enabled_at); - - engine->stats.total = ktime_add(engine->stats.total, - last); - } - } - - write_sequnlock_irqrestore(&engine->stats.lock, flags); -} - int intel_enable_engine_stats(struct intel_engine_cs *engine); void intel_disable_engine_stats(struct intel_engine_cs *engine); @@ -525,4 +326,22 @@ void intel_engine_init_active(struct intel_engine_cs *engine, #define ENGINE_MOCK 1 #define ENGINE_VIRTUAL 2 +static inline bool +intel_engine_has_preempt_reset(const struct intel_engine_cs *engine) +{ + if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + return false; + + return intel_engine_has_preemption(engine); +} + +static inline bool +intel_engine_has_timeslices(const struct intel_engine_cs *engine) +{ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return false; + + return intel_engine_has_semaphores(engine); +} + #endif /* _INTEL_RINGBUFFER_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 4ce8626b140e..5ca3ec911e50 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -37,6 +37,7 @@ #include "intel_context.h" #include "intel_lrc.h" #include "intel_reset.h" +#include "intel_ring.h" /* Haswell does have the CXT_SIZE register however it does not appear to be * valid. Now, docs explain in dwords what is in the context object. The full @@ -277,6 +278,9 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH)); BUILD_BUG_ON(MAX_ENGINE_INSTANCE >= BIT(GEN11_ENGINE_INSTANCE_WIDTH)); + if (GEM_DEBUG_WARN_ON(id >= ARRAY_SIZE(gt->engine))) + return -EINVAL; + if (GEM_DEBUG_WARN_ON(info->class > MAX_ENGINE_CLASS)) return -EINVAL; @@ -293,6 +297,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) BUILD_BUG_ON(BITS_PER_TYPE(engine->mask) < I915_NUM_ENGINES); engine->id = id; + engine->legacy_idx = INVALID_ENGINE; engine->mask = BIT(id); engine->i915 = gt->i915; engine->gt = gt; @@ -304,6 +309,15 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) engine->instance = info->instance; __sprint_engine_name(engine); + engine->props.heartbeat_interval_ms = + CONFIG_DRM_I915_HEARTBEAT_INTERVAL; + engine->props.preempt_timeout_ms = + CONFIG_DRM_I915_PREEMPT_TIMEOUT; + engine->props.stop_timeout_ms = + CONFIG_DRM_I915_STOP_TIMEOUT; + engine->props.timeslice_duration_ms = + CONFIG_DRM_I915_TIMESLICE_DURATION; + /* * To be overridden by the backend on setup. However to facilitate * cleanup on error during setup, we always provide the destroy vfunc. @@ -328,6 +342,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id) intel_engine_sanitize_mmio(engine); gt->engine_class[info->class][info->instance] = engine; + gt->engine[id] = engine; intel_engine_add_user(engine); gt->i915->engine[id] = engine; @@ -365,38 +380,40 @@ static void __setup_engine_capabilities(struct intel_engine_cs *engine) } } -static void intel_setup_engine_capabilities(struct drm_i915_private *i915) +static void intel_setup_engine_capabilities(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) + for_each_engine(engine, gt, id) __setup_engine_capabilities(engine); } /** * intel_engines_cleanup() - free the resources allocated for Command Streamers - * @i915: the i915 devic + * @gt: pointer to struct intel_gt */ -void intel_engines_cleanup(struct drm_i915_private *i915) +void intel_engines_cleanup(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { engine->destroy(engine); - i915->engine[id] = NULL; + gt->engine[id] = NULL; + gt->i915->engine[id] = NULL; } } /** * intel_engines_init_mmio() - allocate and prepare the Engine Command Streamers - * @i915: the i915 device + * @gt: pointer to struct intel_gt * * Return: non-zero if the initialization failed. */ -int intel_engines_init_mmio(struct drm_i915_private *i915) +int intel_engines_init_mmio(struct intel_gt *gt) { + struct drm_i915_private *i915 = gt->i915; struct intel_device_info *device_info = mkwrite_device_info(i915); const unsigned int engine_mask = INTEL_INFO(i915)->engine_mask; unsigned int mask = 0; @@ -414,7 +431,7 @@ int intel_engines_init_mmio(struct drm_i915_private *i915) if (!HAS_ENGINE(i915, i)) continue; - err = intel_engine_setup(&i915->gt, i); + err = intel_engine_setup(gt, i); if (err) goto cleanup; @@ -431,36 +448,36 @@ int intel_engines_init_mmio(struct drm_i915_private *i915) RUNTIME_INFO(i915)->num_engines = hweight32(mask); - intel_gt_check_and_clear_faults(&i915->gt); + intel_gt_check_and_clear_faults(gt); - intel_setup_engine_capabilities(i915); + intel_setup_engine_capabilities(gt); return 0; cleanup: - intel_engines_cleanup(i915); + intel_engines_cleanup(gt); return err; } /** * intel_engines_init() - init the Engine Command Streamers - * @i915: i915 device private + * @gt: pointer to struct intel_gt * * Return: non-zero if the initialization failed. */ -int intel_engines_init(struct drm_i915_private *i915) +int intel_engines_init(struct intel_gt *gt) { int (*init)(struct intel_engine_cs *engine); struct intel_engine_cs *engine; enum intel_engine_id id; int err; - if (HAS_EXECLISTS(i915)) + if (HAS_EXECLISTS(gt->i915)) init = intel_execlists_submission_init; else init = intel_ring_submission_init; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { err = init(engine); if (err) goto cleanup; @@ -469,7 +486,7 @@ int intel_engines_init(struct drm_i915_private *i915) return 0; cleanup: - intel_engines_cleanup(i915); + intel_engines_cleanup(gt); return err; } @@ -513,7 +530,7 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine, unsigned int flags; flags = PIN_GLOBAL; - if (!HAS_LLC(engine->i915)) + if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt)) /* * On g33, we cannot place HWS above 256MiB, so * restrict its pinning to the low mappable arena. @@ -597,7 +614,6 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) intel_engine_init_active(engine, ENGINE_PHYSICAL); intel_engine_init_breadcrumbs(engine); intel_engine_init_execlists(engine); - intel_engine_init_hangcheck(engine); intel_engine_init_cmd_parser(engine); intel_engine_init__pm(engine); @@ -616,26 +632,26 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine) /** * intel_engines_setup- setup engine state not requiring hw access - * @i915: Device to setup. + * @gt: pointer to struct intel_gt * * Initializes engine structure members shared between legacy and execlists * submission modes which do not require hardware access. * * Typically done early in the submission mode specific engine setup stage. */ -int intel_engines_setup(struct drm_i915_private *i915) +int intel_engines_setup(struct intel_gt *gt) { int (*setup)(struct intel_engine_cs *engine); struct intel_engine_cs *engine; enum intel_engine_id id; int err; - if (HAS_EXECLISTS(i915)) + if (HAS_EXECLISTS(gt->i915)) setup = intel_execlists_submission_setup; else setup = intel_ring_submission_setup; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { err = intel_engine_setup_common(engine); if (err) goto cleanup; @@ -653,7 +669,7 @@ int intel_engines_setup(struct drm_i915_private *i915) return 0; cleanup: - intel_engines_cleanup(i915); + intel_engines_cleanup(gt); return err; } @@ -680,6 +696,8 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) engine->status_page.vma)) goto out_frame; + mutex_lock(&frame->timeline.mutex); + frame->ring.vaddr = frame->cs; frame->ring.size = sizeof(frame->cs); frame->ring.effective_size = frame->ring.size; @@ -688,18 +706,22 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine) frame->rq.i915 = engine->i915; frame->rq.engine = engine; frame->rq.ring = &frame->ring; - frame->rq.timeline = &frame->timeline; + rcu_assign_pointer(frame->rq.timeline, &frame->timeline); dw = intel_timeline_pin(&frame->timeline); if (dw < 0) goto out_timeline; + spin_lock_irq(&engine->active.lock); dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs; + spin_unlock_irq(&engine->active.lock); + GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */ intel_timeline_unpin(&frame->timeline); out_timeline: + mutex_unlock(&frame->timeline.mutex); intel_timeline_fini(&frame->timeline); out_frame: kfree(frame); @@ -730,6 +752,7 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass) static struct intel_context * create_kernel_context(struct intel_engine_cs *engine) { + static struct lock_class_key kernel; struct intel_context *ce; int err; @@ -745,6 +768,14 @@ create_kernel_context(struct intel_engine_cs *engine) return ERR_PTR(err); } + /* + * Give our perma-pinned kernel timelines a separate lockdep class, + * so that we can use them from within the normal user timelines + * should we need to inject GPU operations during their request + * construction. + */ + lockdep_set_class(&ce->timeline->mutex, &kernel); + return ce; } @@ -814,8 +845,10 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) if (engine->default_state) i915_gem_object_put(engine->default_state); - intel_context_unpin(engine->kernel_context); - intel_context_put(engine->kernel_context); + if (engine->kernel_context) { + intel_context_unpin(engine->kernel_context); + intel_context_put(engine->kernel_context); + } GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); intel_wa_list_free(&engine->ctx_wa_list); @@ -851,6 +884,21 @@ u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) return bbaddr; } +static unsigned long stop_timeout(const struct intel_engine_cs *engine) +{ + if (in_atomic() || irqs_disabled()) /* inside atomic preempt-reset? */ + return 0; + + /* + * If we are doing a normal GPU reset, we can take our time and allow + * the engine to quiesce. We've stopped submission to the engine, and + * if we wait long enough an innocent context should complete and + * leave the engine idle. So they should not be caught unaware by + * the forthcoming GPU reset (which usually follows the stop_cs)! + */ + return READ_ONCE(engine->props.stop_timeout_ms); +} + int intel_engine_stop_cs(struct intel_engine_cs *engine) { struct intel_uncore *uncore = engine->uncore; @@ -868,7 +916,7 @@ int intel_engine_stop_cs(struct intel_engine_cs *engine) err = 0; if (__intel_wait_for_register_fw(uncore, mode, MODE_IDLE, MODE_IDLE, - 1000, 0, + 1000, stop_timeout(engine), NULL)) { GEM_TRACE("%s: timed out on STOP_RING -> IDLE\n", engine->name); err = -ETIMEDOUT; @@ -948,6 +996,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, struct intel_instdone *instdone) { struct drm_i915_private *i915 = engine->i915; + const struct sseu_dev_info *sseu = &RUNTIME_INFO(i915)->sseu; struct intel_uncore *uncore = engine->uncore; u32 mmio_base = engine->mmio_base; int slice; @@ -965,7 +1014,7 @@ void intel_engine_get_instdone(struct intel_engine_cs *engine, instdone->slice_common = intel_uncore_read(uncore, GEN7_SC_INSTDONE); - for_each_instdone_slice_subslice(i915, slice, subslice) { + for_each_instdone_slice_subslice(i915, sseu, slice, subslice) { instdone->sampler[slice][subslice] = read_subslice_reg(engine, slice, subslice, GEN7_SAMPLER_INSTDONE); @@ -1031,6 +1080,25 @@ static bool ring_is_idle(struct intel_engine_cs *engine) return idle; } +void intel_engine_flush_submission(struct intel_engine_cs *engine) +{ + struct tasklet_struct *t = &engine->execlists.tasklet; + + if (__tasklet_is_scheduled(t)) { + local_bh_disable(); + if (tasklet_trylock(t)) { + /* Must wait for any GPU reset in progress. */ + if (__tasklet_is_enabled(t)) + t->func(t->data); + tasklet_unlock(t); + } + local_bh_enable(); + } + + /* Otherwise flush the tasklet if it was running on another cpu */ + tasklet_unlock_wait(t); +} + /** * intel_engine_is_idle() - Report if the engine has finished process all work * @engine: the intel_engine_cs @@ -1049,21 +1117,9 @@ bool intel_engine_is_idle(struct intel_engine_cs *engine) /* Waiting to drain ELSP? */ if (execlists_active(&engine->execlists)) { - struct tasklet_struct *t = &engine->execlists.tasklet; - synchronize_hardirq(engine->i915->drm.pdev->irq); - local_bh_disable(); - if (tasklet_trylock(t)) { - /* Must wait for any GPU reset in progress. */ - if (__tasklet_is_enabled(t)) - t->func(t->data); - tasklet_unlock(t); - } - local_bh_enable(); - - /* Otherwise flush the tasklet if it was on another cpu */ - tasklet_unlock_wait(t); + intel_engine_flush_submission(engine); if (execlists_active(&engine->execlists)) return false; @@ -1093,7 +1149,7 @@ bool intel_engines_are_idle(struct intel_gt *gt) if (!READ_ONCE(gt->awake)) return true; - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { if (!intel_engine_is_idle(engine)) return false; } @@ -1106,7 +1162,7 @@ void intel_engines_reset_default_submission(struct intel_gt *gt) struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, gt->i915, id) + for_each_engine(engine, gt, id) engine->set_default_submission(engine); } @@ -1118,6 +1174,8 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine) case 3: /* maybe only uses physical not virtual addresses */ return !(IS_I915G(engine->i915) || IS_I915GM(engine->i915)); + case 4: + return !IS_I965G(engine->i915); /* who knows! */ case 6: return engine->class != VIDEO_DECODE_CLASS; /* b0rked */ default: @@ -1193,6 +1251,38 @@ static void hexdump(struct drm_printer *m, const void *buf, size_t len) } } +static struct intel_timeline *get_timeline(struct i915_request *rq) +{ + struct intel_timeline *tl; + + /* + * Even though we are holding the engine->active.lock here, there + * is no control over the submission queue per-se and we are + * inspecting the active state at a random point in time, with an + * unknown queue. Play safe and make sure the timeline remains valid. + * (Only being used for pretty printing, one extra kref shouldn't + * cause a camel stampede!) + */ + rcu_read_lock(); + tl = rcu_dereference(rq->timeline); + if (!kref_get_unless_zero(&tl->kref)) + tl = NULL; + rcu_read_unlock(); + + return tl; +} + +static const char *repr_timer(const struct timer_list *t) +{ + if (!READ_ONCE(t->expires)) + return "inactive"; + + if (timer_pending(t)) + return "active"; + + return "expired"; +} + static void intel_engine_print_registers(struct intel_engine_cs *engine, struct drm_printer *m) { @@ -1254,19 +1344,21 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, unsigned int idx; u8 read, write; - drm_printf(m, "\tExeclist status: 0x%08x %08x, entries %u\n", - ENGINE_READ(engine, RING_EXECLIST_STATUS_LO), - ENGINE_READ(engine, RING_EXECLIST_STATUS_HI), - num_entries); + drm_printf(m, "\tExeclist tasklet queued? %s (%s), preempt? %s, timeslice? %s\n", + yesno(test_bit(TASKLET_STATE_SCHED, + &engine->execlists.tasklet.state)), + enableddisabled(!atomic_read(&engine->execlists.tasklet.count)), + repr_timer(&engine->execlists.preempt), + repr_timer(&engine->execlists.timer)); read = execlists->csb_head; write = READ_ONCE(*execlists->csb_write); - drm_printf(m, "\tExeclist CSB read %d, write %d, tasklet queued? %s (%s)\n", - read, write, - yesno(test_bit(TASKLET_STATE_SCHED, - &engine->execlists.tasklet.state)), - enableddisabled(!atomic_read(&engine->execlists.tasklet.count))); + drm_printf(m, "\tExeclist status: 0x%08x %08x; CSB read:%d, write:%d, entries:%d\n", + ENGINE_READ(engine, RING_EXECLIST_STATUS_LO), + ENGINE_READ(engine, RING_EXECLIST_STATUS_HI), + read, write, num_entries); + if (read >= num_entries) read = 0; if (write >= num_entries) @@ -1280,33 +1372,45 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine, } execlists_active_lock_bh(execlists); + rcu_read_lock(); for (port = execlists->active; (rq = *port); port++) { char hdr[80]; int len; len = snprintf(hdr, sizeof(hdr), - "\t\tActive[%d: ", + "\t\tActive[%d]: ", (int)(port - execlists->active)); - if (!i915_request_signaled(rq)) + if (!i915_request_signaled(rq)) { + struct intel_timeline *tl = get_timeline(rq); + len += snprintf(hdr + len, sizeof(hdr) - len, "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ", i915_ggtt_offset(rq->ring->vma), - rq->timeline->hwsp_offset, + tl ? tl->hwsp_offset : 0, hwsp_seqno(rq)); + + if (tl) + intel_timeline_put(tl); + } snprintf(hdr + len, sizeof(hdr) - len, "rq: "); print_request(m, rq, hdr); } for (port = execlists->pending; (rq = *port); port++) { + struct intel_timeline *tl = get_timeline(rq); char hdr[80]; snprintf(hdr, sizeof(hdr), "\t\tPending[%d] ring:{start:%08x, hwsp:%08x, seqno:%08x}, rq: ", (int)(port - execlists->pending), i915_ggtt_offset(rq->ring->vma), - rq->timeline->hwsp_offset, + tl ? tl->hwsp_offset : 0, hwsp_seqno(rq)); print_request(m, rq, hdr); + + if (tl) + intel_timeline_put(tl); } + rcu_read_unlock(); execlists_active_unlock_bh(execlists); } else if (INTEL_GEN(dev_priv) > 6) { drm_printf(m, "\tPP_DIR_BASE: 0x%08x\n", @@ -1372,8 +1476,13 @@ void intel_engine_dump(struct intel_engine_cs *engine, drm_printf(m, "*** WEDGED ***\n"); drm_printf(m, "\tAwake? %d\n", atomic_read(&engine->wakeref.count)); - drm_printf(m, "\tHangcheck: %d ms ago\n", - jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp)); + + rcu_read_lock(); + rq = READ_ONCE(engine->heartbeat.systole); + if (rq) + drm_printf(m, "\tHeartbeat: %d ms ago\n", + jiffies_to_msecs(jiffies - rq->emitted_jiffies)); + rcu_read_unlock(); drm_printf(m, "\tReset count: %d (global %d)\n", i915_reset_engine_count(error, engine), i915_reset_count(error)); @@ -1383,6 +1492,8 @@ void intel_engine_dump(struct intel_engine_cs *engine, spin_lock_irqsave(&engine->active.lock, flags); rq = intel_engine_find_active_request(engine); if (rq) { + struct intel_timeline *tl = get_timeline(rq); + print_request(m, rq, "\t\tactive "); drm_printf(m, "\t\tring->start: 0x%08x\n", @@ -1395,18 +1506,27 @@ void intel_engine_dump(struct intel_engine_cs *engine, rq->ring->emit); drm_printf(m, "\t\tring->space: 0x%08x\n", rq->ring->space); - drm_printf(m, "\t\tring->hwsp: 0x%08x\n", - rq->timeline->hwsp_offset); + + if (tl) { + drm_printf(m, "\t\tring->hwsp: 0x%08x\n", + tl->hwsp_offset); + intel_timeline_put(tl); + } print_request_ring(m, rq); + + if (rq->hw_context->lrc_reg_state) { + drm_printf(m, "Logical Ring Context:\n"); + hexdump(m, rq->hw_context->lrc_reg_state, PAGE_SIZE); + } } spin_unlock_irqrestore(&engine->active.lock, flags); drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base); - wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm); + wakeref = intel_runtime_pm_get_if_in_use(engine->uncore->rpm); if (wakeref) { intel_engine_print_registers(engine, m); - intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref); + intel_runtime_pm_put(engine->uncore->rpm, wakeref); } else { drm_printf(m, "\tDevice is asleep; skipping register dump\n"); } diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c new file mode 100644 index 000000000000..06aa14c7aa8c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c @@ -0,0 +1,234 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_request.h" + +#include "intel_context.h" +#include "intel_engine_heartbeat.h" +#include "intel_engine_pm.h" +#include "intel_engine.h" +#include "intel_gt.h" +#include "intel_reset.h" + +/* + * While the engine is active, we send a periodic pulse along the engine + * to check on its health and to flush any idle-barriers. If that request + * is stuck, and we fail to preempt it, we declare the engine hung and + * issue a reset -- in the hope that restores progress. + */ + +static bool next_heartbeat(struct intel_engine_cs *engine) +{ + long delay; + + delay = READ_ONCE(engine->props.heartbeat_interval_ms); + if (!delay) + return false; + + delay = msecs_to_jiffies_timeout(delay); + if (delay >= HZ) + delay = round_jiffies_up_relative(delay); + schedule_delayed_work(&engine->heartbeat.work, delay); + + return true; +} + +static void idle_pulse(struct intel_engine_cs *engine, struct i915_request *rq) +{ + engine->wakeref_serial = READ_ONCE(engine->serial) + 1; + i915_request_add_active_barriers(rq); +} + +static void show_heartbeat(const struct i915_request *rq, + struct intel_engine_cs *engine) +{ + struct drm_printer p = drm_debug_printer("heartbeat"); + + intel_engine_dump(engine, &p, + "%s heartbeat {prio:%d} not ticking\n", + engine->name, + rq->sched.attr.priority); +} + +static void heartbeat(struct work_struct *wrk) +{ + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MIN), + }; + struct intel_engine_cs *engine = + container_of(wrk, typeof(*engine), heartbeat.work.work); + struct intel_context *ce = engine->kernel_context; + struct i915_request *rq; + + if (!intel_engine_pm_get_if_awake(engine)) + return; + + rq = engine->heartbeat.systole; + if (rq && i915_request_completed(rq)) { + i915_request_put(rq); + engine->heartbeat.systole = NULL; + } + + if (intel_gt_is_wedged(engine->gt)) + goto out; + + if (engine->heartbeat.systole) { + if (engine->schedule && + rq->sched.attr.priority < I915_PRIORITY_BARRIER) { + /* + * Gradually raise the priority of the heartbeat to + * give high priority work [which presumably desires + * low latency and no jitter] the chance to naturally + * complete before being preempted. + */ + attr.priority = I915_PRIORITY_MASK; + if (rq->sched.attr.priority >= attr.priority) + attr.priority |= I915_USER_PRIORITY(I915_PRIORITY_HEARTBEAT); + if (rq->sched.attr.priority >= attr.priority) + attr.priority = I915_PRIORITY_BARRIER; + + local_bh_disable(); + engine->schedule(rq, &attr); + local_bh_enable(); + } else { + if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) + show_heartbeat(rq, engine); + + intel_gt_handle_error(engine->gt, engine->mask, + I915_ERROR_CAPTURE, + "stopped heartbeat on %s", + engine->name); + } + goto out; + } + + if (engine->wakeref_serial == engine->serial) + goto out; + + mutex_lock(&ce->timeline->mutex); + + intel_context_enter(ce); + rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN); + intel_context_exit(ce); + if (IS_ERR(rq)) + goto unlock; + + idle_pulse(engine, rq); + if (i915_modparams.enable_hangcheck) + engine->heartbeat.systole = i915_request_get(rq); + + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + +unlock: + mutex_unlock(&ce->timeline->mutex); +out: + if (!next_heartbeat(engine)) + i915_request_put(fetch_and_zero(&engine->heartbeat.systole)); + intel_engine_pm_put(engine); +} + +void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine) +{ + if (!IS_ACTIVE(CONFIG_DRM_I915_HEARTBEAT_INTERVAL)) + return; + + next_heartbeat(engine); +} + +void intel_engine_park_heartbeat(struct intel_engine_cs *engine) +{ + if (cancel_delayed_work(&engine->heartbeat.work)) + i915_request_put(fetch_and_zero(&engine->heartbeat.systole)); +} + +void intel_engine_init_heartbeat(struct intel_engine_cs *engine) +{ + INIT_DELAYED_WORK(&engine->heartbeat.work, heartbeat); +} + +int intel_engine_set_heartbeat(struct intel_engine_cs *engine, + unsigned long delay) +{ + int err; + + /* Send one last pulse before to cleanup persistent hogs */ + if (!delay && IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) { + err = intel_engine_pulse(engine); + if (err) + return err; + } + + WRITE_ONCE(engine->props.heartbeat_interval_ms, delay); + + if (intel_engine_pm_get_if_awake(engine)) { + if (delay) + intel_engine_unpark_heartbeat(engine); + else + intel_engine_park_heartbeat(engine); + intel_engine_pm_put(engine); + } + + return 0; +} + +int intel_engine_pulse(struct intel_engine_cs *engine) +{ + struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER }; + struct intel_context *ce = engine->kernel_context; + struct i915_request *rq; + int err = 0; + + if (!intel_engine_has_preemption(engine)) + return -ENODEV; + + if (!intel_engine_pm_get_if_awake(engine)) + return 0; + + if (mutex_lock_interruptible(&ce->timeline->mutex)) + goto out_rpm; + + intel_context_enter(ce); + rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN); + intel_context_exit(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_unlock; + } + + rq->flags |= I915_REQUEST_SENTINEL; + idle_pulse(engine, rq); + + __i915_request_commit(rq); + __i915_request_queue(rq, &attr); + +out_unlock: + mutex_unlock(&ce->timeline->mutex); +out_rpm: + intel_engine_pm_put(engine); + return err; +} + +int intel_engine_flush_barriers(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + if (llist_empty(&engine->barrier_tasks)) + return 0; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + idle_pulse(engine, rq); + i915_request_add(rq); + + return 0; +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_engine_heartbeat.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h new file mode 100644 index 000000000000..a7b8c0f9e005 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.h @@ -0,0 +1,23 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_ENGINE_HEARTBEAT_H +#define INTEL_ENGINE_HEARTBEAT_H + +struct intel_engine_cs; + +void intel_engine_init_heartbeat(struct intel_engine_cs *engine); + +int intel_engine_set_heartbeat(struct intel_engine_cs *engine, + unsigned long delay); + +void intel_engine_park_heartbeat(struct intel_engine_cs *engine); +void intel_engine_unpark_heartbeat(struct intel_engine_cs *engine); + +int intel_engine_pulse(struct intel_engine_cs *engine); +int intel_engine_flush_barriers(struct intel_engine_cs *engine); + +#endif /* INTEL_ENGINE_HEARTBEAT_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c index 7f647243b3b9..874d82677179 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c @@ -7,10 +7,13 @@ #include "i915_drv.h" #include "intel_engine.h" +#include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" #include "intel_engine_pool.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_rc6.h" +#include "intel_ring.h" static int __engine_unpark(struct intel_wakeref *wf) { @@ -33,7 +36,7 @@ static int __engine_unpark(struct intel_wakeref *wf) if (engine->unpark) engine->unpark(engine); - intel_engine_init_hangcheck(engine); + intel_engine_unpark_heartbeat(engine); return 0; } @@ -103,14 +106,14 @@ static bool switch_to_kernel_context(struct intel_engine_cs *engine) /* Context switch failed, hope for the best! Maybe reset? */ goto out_unlock; - intel_timeline_enter(rq->timeline); + intel_timeline_enter(i915_request_timeline(rq)); /* Check again on the next retirement. */ engine->wakeref_serial = engine->serial + 1; i915_request_add_active_barriers(rq); /* Install ourselves as a preemption barrier */ - rq->sched.attr.priority = I915_PRIORITY_UNPREEMPTABLE; + rq->sched.attr.priority = I915_PRIORITY_BARRIER; __i915_request_commit(rq); /* Release our exclusive hold on the engine */ @@ -123,6 +126,19 @@ out_unlock: return result; } +static void call_idle_barriers(struct intel_engine_cs *engine) +{ + struct llist_node *node, *next; + + llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { + struct dma_fence_cb *cb = + container_of((struct list_head *)node, + typeof(*cb), node); + + cb->func(NULL, cb); + } +} + static int __engine_park(struct intel_wakeref *wf) { struct intel_engine_cs *engine = @@ -142,6 +158,9 @@ static int __engine_park(struct intel_wakeref *wf) GEM_TRACE("%s\n", engine->name); + call_idle_barriers(engine); /* cleanup after wedging */ + + intel_engine_park_heartbeat(engine); intel_engine_disarm_breadcrumbs(engine); intel_engine_pool_park(&engine->pool); @@ -169,9 +188,10 @@ static const struct intel_wakeref_ops wf_ops = { void intel_engine_init__pm(struct intel_engine_cs *engine) { - struct intel_runtime_pm *rpm = &engine->i915->runtime_pm; + struct intel_runtime_pm *rpm = engine->uncore->rpm; intel_wakeref_init(&engine->wakeref, rpm, &wf_ops); + intel_engine_init_heartbeat(engine); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.c b/drivers/gpu/drm/i915/gt/intel_engine_pool.c index 379a91780bd4..397186818305 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pool.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.c @@ -61,6 +61,7 @@ static int pool_active(struct i915_active *ref) return 0; } +__i915_active_call static void pool_retire(struct i915_active *ref) { struct intel_engine_pool_node *node = @@ -94,7 +95,7 @@ node_create(struct intel_engine_pool *pool, size_t sz) return ERR_PTR(-ENOMEM); node->pool = pool; - i915_active_init(engine->i915, &node->active, pool_active, pool_retire); + i915_active_init(&node->active, pool_active, pool_retire); obj = i915_gem_object_create_internal(engine->i915, sz); if (IS_ERR(obj)) { @@ -109,9 +110,19 @@ node_create(struct intel_engine_pool *pool, size_t sz) return node; } +static struct intel_engine_pool *lookup_pool(struct intel_engine_cs *engine) +{ + if (intel_engine_is_virtual(engine)) + engine = intel_virtual_engine_get_sibling(engine, 0); + + GEM_BUG_ON(!engine); + return &engine->pool; +} + struct intel_engine_pool_node * -intel_engine_pool_get(struct intel_engine_pool *pool, size_t size) +intel_engine_get_pool(struct intel_engine_cs *engine, size_t size) { + struct intel_engine_pool *pool = lookup_pool(engine); struct intel_engine_pool_node *node; struct list_head *list; unsigned long flags; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pool.h b/drivers/gpu/drm/i915/gt/intel_engine_pool.h index 8d069efd9457..1bd89cadc3b7 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_pool.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_pool.h @@ -12,13 +12,13 @@ #include "i915_request.h" struct intel_engine_pool_node * -intel_engine_pool_get(struct intel_engine_pool *pool, size_t size); +intel_engine_get_pool(struct intel_engine_cs *engine, size_t size); static inline int intel_engine_pool_mark_active(struct intel_engine_pool_node *node, struct i915_request *rq) { - return i915_active_ref(&node->active, rq->timeline, rq); + return i915_active_add_request(&node->active, rq); } static inline void diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 9dd8c299cb2d..758f0e8ec672 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -15,6 +15,7 @@ #include <linux/rbtree.h> #include <linux/timer.h> #include <linux/types.h> +#include <linux/workqueue.h> #include "i915_gem.h" #include "i915_pmu.h" @@ -58,6 +59,7 @@ struct i915_gem_context; struct i915_request; struct i915_sched_attr; struct intel_gt; +struct intel_ring; struct intel_uncore; typedef u8 intel_engine_mask_t; @@ -76,40 +78,6 @@ struct intel_instdone { u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES]; }; -struct intel_engine_hangcheck { - u64 acthd; - u32 last_ring; - u32 last_head; - unsigned long action_timestamp; - struct intel_instdone instdone; -}; - -struct intel_ring { - struct kref ref; - struct i915_vma *vma; - void *vaddr; - - /* - * As we have two types of rings, one global to the engine used - * by ringbuffer submission and those that are exclusive to a - * context used by execlists, we have to play safe and allow - * atomic updates to the pin_count. However, the actual pinning - * of the context is either done during initialisation for - * ringbuffer submission or serialised as part of the context - * pinning for execlists, and so we do not need a mutex ourselves - * to serialise intel_ring_pin/intel_ring_unpin. - */ - atomic_t pin_count; - - u32 head; - u32 tail; - u32 emit; - - u32 space; - u32 size; - u32 effective_size; -}; - /* * we use a single page to load ctx workarounds so all of these * values are referred in terms of dwords @@ -148,6 +116,7 @@ enum intel_engine_id { VECS1, #define _VECS(n) (VECS0 + (n)) I915_NUM_ENGINES +#define INVALID_ENGINE ((enum intel_engine_id)-1) }; struct st_preempt_hang { @@ -174,6 +143,11 @@ struct intel_engine_execlists { struct timer_list timer; /** + * @preempt: reset the current context if it fails to give way + */ + struct timer_list preempt; + + /** * @default_priolist: priority list for I915_PRIORITY_NORMAL */ struct i915_priolist default_priolist; @@ -303,10 +277,12 @@ struct intel_engine_cs { u8 uabi_class; u8 uabi_instance; + u32 uabi_capabilities; u32 context_size; u32 mmio_base; - u32 uabi_capabilities; + unsigned int context_tag; +#define NUM_CONTEXT_TAG roundup_pow_of_two(2 * EXECLIST_MAX_PORTS) struct rb_node uabi_node; @@ -323,6 +299,11 @@ struct intel_engine_cs { intel_engine_mask_t saturated; /* submitting semaphores too late? */ + struct { + struct delayed_work work; + struct i915_request *systole; + } heartbeat; + unsigned long serial; unsigned long wakeref_serial; @@ -473,14 +454,13 @@ struct intel_engine_cs { /* status_notifier: list of callbacks for context-switch changes */ struct atomic_notifier_head context_status_notifier; - struct intel_engine_hangcheck hangcheck; - #define I915_ENGINE_USING_CMD_PARSER BIT(0) #define I915_ENGINE_SUPPORTS_STATS BIT(1) #define I915_ENGINE_HAS_PREEMPTION BIT(2) #define I915_ENGINE_HAS_SEMAPHORES BIT(3) #define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4) #define I915_ENGINE_IS_VIRTUAL BIT(5) +#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6) #define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7) unsigned int flags; @@ -539,6 +519,13 @@ struct intel_engine_cs { */ ktime_t total; } stats; + + struct { + unsigned long heartbeat_interval_ms; + unsigned long preempt_timeout_ms; + unsigned long stop_timeout_ms; + unsigned long timeslice_duration_ms; + } props; }; static inline bool @@ -583,20 +570,24 @@ intel_engine_is_virtual(const struct intel_engine_cs *engine) return engine->flags & I915_ENGINE_IS_VIRTUAL; } -#define instdone_slice_mask(dev_priv__) \ - (IS_GEN(dev_priv__, 7) ? \ - 1 : RUNTIME_INFO(dev_priv__)->sseu.slice_mask) +static inline bool +intel_engine_has_relative_mmio(const struct intel_engine_cs * const engine) +{ + return engine->flags & I915_ENGINE_HAS_RELATIVE_MMIO; +} -#define instdone_subslice_mask(dev_priv__) \ - (IS_GEN(dev_priv__, 7) ? \ - 1 : RUNTIME_INFO(dev_priv__)->sseu.subslice_mask[0]) +#define instdone_has_slice(dev_priv___, sseu___, slice___) \ + ((IS_GEN(dev_priv___, 7) ? 1 : ((sseu___)->slice_mask)) & BIT(slice___)) -#define for_each_instdone_slice_subslice(dev_priv__, slice__, subslice__) \ - for ((slice__) = 0, (subslice__) = 0; \ - (slice__) < I915_MAX_SLICES; \ - (subslice__) = ((subslice__) + 1) < I915_MAX_SUBSLICES ? (subslice__) + 1 : 0, \ - (slice__) += ((subslice__) == 0)) \ - for_each_if((BIT(slice__) & instdone_slice_mask(dev_priv__)) && \ - (BIT(subslice__) & instdone_subslice_mask(dev_priv__))) +#define instdone_has_subslice(dev_priv__, sseu__, slice__, subslice__) \ + (IS_GEN(dev_priv__, 7) ? (1 & BIT(subslice__)) : \ + intel_sseu_has_subslice(sseu__, 0, subslice__)) +#define for_each_instdone_slice_subslice(dev_priv_, sseu_, slice_, subslice_) \ + for ((slice_) = 0, (subslice_) = 0; (slice_) < I915_MAX_SLICES; \ + (subslice_) = ((subslice_) + 1) % I915_MAX_SUBSLICES, \ + (slice_) += ((subslice_) == 0)) \ + for_each_if((instdone_has_slice(dev_priv_, sseu_, slice_)) && \ + (instdone_has_subslice(dev_priv_, sseu_, slice_, \ + subslice_))) #endif /* __INTEL_ENGINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c index 77cd5de83930..7f7150a733f4 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_user.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c @@ -160,10 +160,10 @@ static int legacy_ring_idx(const struct legacy_ring *ring) }; if (GEM_DEBUG_WARN_ON(ring->class >= ARRAY_SIZE(map))) - return -1; + return INVALID_ENGINE; if (GEM_DEBUG_WARN_ON(ring->instance >= map[ring->class].max)) - return -1; + return INVALID_ENGINE; return map[ring->class].base + ring->instance; } @@ -171,23 +171,15 @@ static int legacy_ring_idx(const struct legacy_ring *ring) static void add_legacy_ring(struct legacy_ring *ring, struct intel_engine_cs *engine) { - int idx; - if (engine->gt != ring->gt || engine->class != ring->class) { ring->gt = engine->gt; ring->class = engine->class; ring->instance = 0; } - idx = legacy_ring_idx(ring); - if (unlikely(idx == -1)) - return; - - GEM_BUG_ON(idx >= ARRAY_SIZE(ring->gt->engine)); - ring->gt->engine[idx] = engine; - ring->instance++; - - engine->legacy_idx = idx; + engine->legacy_idx = legacy_ring_idx(ring); + if (engine->legacy_idx != INVALID_ENGINE) + ring->instance++; } void intel_engines_driver_register(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h index 86e00a2db8a4..4294f146f13c 100644 --- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h +++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h @@ -112,6 +112,7 @@ #define MI_SEMAPHORE_SIGNAL MI_INSTR(0x1b, 0) /* GEN8+ */ #define MI_SEMAPHORE_TARGET(engine) ((engine)<<15) #define MI_SEMAPHORE_WAIT MI_INSTR(0x1c, 2) /* GEN8+ */ +#define MI_SEMAPHORE_WAIT_TOKEN MI_INSTR(0x1c, 3) /* GEN12+ */ #define MI_SEMAPHORE_POLL (1 << 15) #define MI_SEMAPHORE_SAD_GT_SDD (0 << 12) #define MI_SEMAPHORE_SAD_GTE_SDD (1 << 12) @@ -119,6 +120,8 @@ #define MI_SEMAPHORE_SAD_LTE_SDD (3 << 12) #define MI_SEMAPHORE_SAD_EQ_SDD (4 << 12) #define MI_SEMAPHORE_SAD_NEQ_SDD (5 << 12) +#define MI_SEMAPHORE_TOKEN_MASK REG_GENMASK(9, 5) +#define MI_SEMAPHORE_TOKEN_SHIFT 5 #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) #define MI_STORE_DWORD_IMM_GEN4 MI_INSTR(0x20, 2) #define MI_MEM_VIRTUAL (1 << 22) /* 945,g33,965 */ @@ -132,7 +135,10 @@ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! */ #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*(x)-1) +/* Gen11+. addr = base + (ctx_restore ? offset & GENMASK(12,2) : offset) */ +#define MI_LRI_CS_MMIO (1<<19) #define MI_LRI_FORCE_POSTED (1<<12) +#define MI_LOAD_REGISTER_IMM_MAX_REGS (126) #define MI_STORE_REGISTER_MEM MI_INSTR(0x24, 1) #define MI_STORE_REGISTER_MEM_GEN8 MI_INSTR(0x24, 2) #define MI_SRM_LRM_GLOBAL_GTT (1<<22) @@ -147,6 +153,7 @@ #define MI_FLUSH_DW_USE_PPGTT (0<<2) #define MI_LOAD_REGISTER_MEM MI_INSTR(0x29, 1) #define MI_LOAD_REGISTER_MEM_GEN8 MI_INSTR(0x29, 2) +#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 1) #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) #define MI_BATCH_NON_SECURE (1) /* for snb/ivb/vlv this also means "batch in ppgtt" when ppgtt is enabled. */ @@ -156,7 +163,8 @@ #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) #define MI_BATCH_GTT (2<<6) /* aliased with (1<<7) on gen4 */ #define MI_BATCH_BUFFER_START_GEN8 MI_INSTR(0x31, 1) -#define MI_BATCH_RESOURCE_STREAMER (1<<10) +#define MI_BATCH_RESOURCE_STREAMER REG_BIT(10) +#define MI_BATCH_PREDICATE REG_BIT(15) /* HSW+ on RCS only*/ /* * 3D instructions used by the kernel @@ -217,6 +225,7 @@ #define PIPE_CONTROL_CS_STALL (1<<20) #define PIPE_CONTROL_TLB_INVALIDATE (1<<18) #define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16) +#define PIPE_CONTROL_WRITE_TIMESTAMP (3<<14) #define PIPE_CONTROL_QW_WRITE (1<<14) #define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) #define PIPE_CONTROL_DEPTH_STALL (1<<13) @@ -224,7 +233,9 @@ #define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ #define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on ILK */ #define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ +#define PIPE_CONTROL_L3_RO_CACHE_INVALIDATE REG_BIT(10) /* gen12 */ #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) +#define PIPE_CONTROL_HDC_PIPELINE_FLUSH REG_BIT(9) /* gen12 */ #define PIPE_CONTROL_NOTIFY (1<<8) #define PIPE_CONTROL_FLUSH_ENABLE (1<<7) /* gen7+ */ #define PIPE_CONTROL_DC_FLUSH_ENABLE (1<<5) @@ -235,6 +246,29 @@ #define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ +#define MI_MATH(x) MI_INSTR(0x1a, (x) - 1) +#define MI_MATH_INSTR(opcode, op1, op2) ((opcode) << 20 | (op1) << 10 | (op2)) +/* Opcodes for MI_MATH_INSTR */ +#define MI_MATH_NOOP MI_MATH_INSTR(0x000, 0x0, 0x0) +#define MI_MATH_LOAD(op1, op2) MI_MATH_INSTR(0x080, op1, op2) +#define MI_MATH_LOADINV(op1, op2) MI_MATH_INSTR(0x480, op1, op2) +#define MI_MATH_LOAD0(op1) MI_MATH_INSTR(0x081, op1) +#define MI_MATH_LOAD1(op1) MI_MATH_INSTR(0x481, op1) +#define MI_MATH_ADD MI_MATH_INSTR(0x100, 0x0, 0x0) +#define MI_MATH_SUB MI_MATH_INSTR(0x101, 0x0, 0x0) +#define MI_MATH_AND MI_MATH_INSTR(0x102, 0x0, 0x0) +#define MI_MATH_OR MI_MATH_INSTR(0x103, 0x0, 0x0) +#define MI_MATH_XOR MI_MATH_INSTR(0x104, 0x0, 0x0) +#define MI_MATH_STORE(op1, op2) MI_MATH_INSTR(0x180, op1, op2) +#define MI_MATH_STOREINV(op1, op2) MI_MATH_INSTR(0x580, op1, op2) +/* Registers used as operands in MI_MATH_INSTR */ +#define MI_MATH_REG(x) (x) +#define MI_MATH_REG_SRCA 0x20 +#define MI_MATH_REG_SRCB 0x21 +#define MI_MATH_REG_ACCU 0x31 +#define MI_MATH_REG_ZF 0x32 +#define MI_MATH_REG_CF 0x33 + /* * Commands used only by the command parser */ @@ -251,7 +285,6 @@ #define MI_CLFLUSH MI_INSTR(0x27, 0) #define MI_REPORT_PERF_COUNT MI_INSTR(0x28, 0) #define MI_REPORT_PERF_COUNT_GGTT (1<<0) -#define MI_LOAD_REGISTER_REG MI_INSTR(0x2A, 0) #define MI_RS_STORE_DATA_IMM MI_INSTR(0x2B, 0) #define MI_LOAD_URB_MEM MI_INSTR(0x2C, 0) #define MI_STORE_URB_MEM MI_INSTR(0x2D, 0) diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index d48ec9a76ed1..4c26daf7ee46 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -6,7 +6,12 @@ #include "i915_drv.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_gt_requests.h" +#include "intel_mocs.h" +#include "intel_rc6.h" +#include "intel_rps.h" #include "intel_uncore.h" +#include "intel_pm.h" void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) { @@ -18,15 +23,108 @@ void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915) INIT_LIST_HEAD(>->closed_vma); spin_lock_init(>->closed_lock); - intel_gt_init_hangcheck(gt); intel_gt_init_reset(gt); + intel_gt_init_requests(gt); intel_gt_pm_init_early(gt); + + intel_rps_init_early(>->rps); intel_uc_init_early(>->uc); } -void intel_gt_init_hw(struct drm_i915_private *i915) +void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt) +{ + gt->ggtt = ggtt; + + intel_gt_sanitize(gt, false); +} + +static void init_unused_ring(struct intel_gt *gt, u32 base) { - i915->gt.ggtt = &i915->ggtt; + struct intel_uncore *uncore = gt->uncore; + + intel_uncore_write(uncore, RING_CTL(base), 0); + intel_uncore_write(uncore, RING_HEAD(base), 0); + intel_uncore_write(uncore, RING_TAIL(base), 0); + intel_uncore_write(uncore, RING_START(base), 0); +} + +static void init_unused_rings(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + + if (IS_I830(i915)) { + init_unused_ring(gt, PRB1_BASE); + init_unused_ring(gt, SRB0_BASE); + init_unused_ring(gt, SRB1_BASE); + init_unused_ring(gt, SRB2_BASE); + init_unused_ring(gt, SRB3_BASE); + } else if (IS_GEN(i915, 2)) { + init_unused_ring(gt, SRB0_BASE); + init_unused_ring(gt, SRB1_BASE); + } else if (IS_GEN(i915, 3)) { + init_unused_ring(gt, PRB1_BASE); + init_unused_ring(gt, PRB2_BASE); + } +} + +int intel_gt_init_hw(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; + int ret; + + BUG_ON(!i915->kernel_context); + ret = intel_gt_terminally_wedged(gt); + if (ret) + return ret; + + gt->last_init_time = ktime_get(); + + /* Double layer security blanket, see i915_gem_init() */ + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9) + intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf)); + + if (IS_HASWELL(i915)) + intel_uncore_write(uncore, + MI_PREDICATE_RESULT_2, + IS_HSW_GT3(i915) ? + LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); + + /* Apply the GT workarounds... */ + intel_gt_apply_workarounds(gt); + /* ...and determine whether they are sticking. */ + intel_gt_verify_workarounds(gt, "init"); + + intel_gt_init_swizzling(gt); + + /* + * At least 830 can leave some of the unused rings + * "active" (ie. head != tail) after resume which + * will prevent c3 entry. Makes sure all unused rings + * are totally idle. + */ + init_unused_rings(gt); + + ret = i915_ppgtt_init_hw(gt); + if (ret) { + DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); + goto out; + } + + /* We can't enable contexts until all firmware is loaded */ + ret = intel_uc_init_hw(>->uc); + if (ret) { + i915_probe_error(i915, "Enabling uc failed (%d)\n", ret); + goto out; + } + + intel_mocs_init(gt); + +out: + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + return ret; } static void rmw_set(struct intel_uncore *uncore, i915_reg_t reg, u32 set) @@ -89,7 +187,7 @@ intel_gt_clear_error_registers(struct intel_gt *gt, struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine_masked(engine, i915, engine_mask, id) + for_each_engine_masked(engine, gt, engine_mask, id) gen8_clear_engine_error_register(engine); } } @@ -100,7 +198,7 @@ static void gen6_check_faults(struct intel_gt *gt) enum intel_engine_id id; u32 fault; - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { fault = GEN6_RING_FAULT_REG_READ(engine); if (fault & RING_FAULT_VALID) { DRM_DEBUG_DRIVER("Unexpected fault\n" @@ -176,7 +274,7 @@ void intel_gt_check_and_clear_faults(struct intel_gt *gt) void intel_gt_flush_ggtt_writes(struct intel_gt *gt) { - struct drm_i915_private *i915 = gt->i915; + struct intel_uncore *uncore = gt->uncore; intel_wakeref_t wakeref; /* @@ -200,18 +298,18 @@ void intel_gt_flush_ggtt_writes(struct intel_gt *gt) wmb(); - if (INTEL_INFO(i915)->has_coherent_ggtt) + if (INTEL_INFO(gt->i915)->has_coherent_ggtt) return; intel_gt_chipset_flush(gt); - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - struct intel_uncore *uncore = gt->uncore; + with_intel_runtime_pm(uncore->rpm, wakeref) { + unsigned long flags; - spin_lock_irq(&uncore->lock); + spin_lock_irqsave(&uncore->lock, flags); intel_uncore_posting_read_fw(uncore, RING_HEAD(RENDER_RING_BASE)); - spin_unlock_irq(&uncore->lock); + spin_unlock_irqrestore(&uncore->lock, flags); } } @@ -222,7 +320,12 @@ void intel_gt_chipset_flush(struct intel_gt *gt) intel_gtt_chipset_flush(); } -int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) +void intel_gt_driver_register(struct intel_gt *gt) +{ + intel_rps_driver_register(>->rps); +} + +static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) { struct drm_i915_private *i915 = gt->i915; struct drm_i915_gem_object *obj; @@ -230,7 +333,7 @@ int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size) int ret; obj = i915_gem_object_create_stolen(i915, size); - if (!obj) + if (IS_ERR(obj)) obj = i915_gem_object_create_internal(i915, size); if (IS_ERR(obj)) { DRM_ERROR("Failed to allocate scratch page\n"); @@ -256,11 +359,40 @@ err_unref: return ret; } -void intel_gt_fini_scratch(struct intel_gt *gt) +static void intel_gt_fini_scratch(struct intel_gt *gt) { i915_vma_unpin_and_release(>->scratch, 0); } +int intel_gt_init(struct intel_gt *gt) +{ + int err; + + err = intel_gt_init_scratch(gt, IS_GEN(gt->i915, 2) ? SZ_256K : SZ_4K); + if (err) + return err; + + intel_gt_pm_init(gt); + + return 0; +} + +void intel_gt_driver_remove(struct intel_gt *gt) +{ + GEM_BUG_ON(gt->awake); +} + +void intel_gt_driver_unregister(struct intel_gt *gt) +{ + intel_rps_driver_unregister(>->rps); +} + +void intel_gt_driver_release(struct intel_gt *gt) +{ + intel_gt_pm_fini(gt); + intel_gt_fini_scratch(gt); +} + void intel_gt_driver_late_release(struct intel_gt *gt) { intel_uc_driver_late_release(>->uc); diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h index 4920cb351f10..5436f8c30708 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.h +++ b/drivers/gpu/drm/i915/gt/intel_gt.h @@ -28,7 +28,14 @@ static inline struct intel_gt *huc_to_gt(struct intel_huc *huc) } void intel_gt_init_early(struct intel_gt *gt, struct drm_i915_private *i915); -void intel_gt_init_hw(struct drm_i915_private *i915); +void intel_gt_init_hw_early(struct intel_gt *gt, struct i915_ggtt *ggtt); +int __must_check intel_gt_init_hw(struct intel_gt *gt); +int intel_gt_init(struct intel_gt *gt); +void intel_gt_driver_register(struct intel_gt *gt); + +void intel_gt_driver_unregister(struct intel_gt *gt); +void intel_gt_driver_remove(struct intel_gt *gt); +void intel_gt_driver_release(struct intel_gt *gt); void intel_gt_driver_late_release(struct intel_gt *gt); @@ -39,11 +46,6 @@ void intel_gt_clear_error_registers(struct intel_gt *gt, void intel_gt_flush_ggtt_writes(struct intel_gt *gt); void intel_gt_chipset_flush(struct intel_gt *gt); -void intel_gt_init_hangcheck(struct intel_gt *gt); - -int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size); -void intel_gt_fini_scratch(struct intel_gt *gt); - static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt, enum intel_gt_scratch_field field) { @@ -55,6 +57,4 @@ static inline bool intel_gt_is_wedged(struct intel_gt *gt) return __intel_reset_failed(>->reset); } -void intel_gt_queue_hangcheck(struct intel_gt *gt); - #endif /* __INTEL_GT_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c index 34a4fb624bf7..973ee7eded64 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c @@ -11,6 +11,7 @@ #include "intel_gt.h" #include "intel_gt_irq.h" #include "intel_uncore.h" +#include "intel_rps.h" static void guc_irq_handler(struct intel_guc *guc, u16 iir) { @@ -77,7 +78,7 @@ gen11_other_irq_handler(struct intel_gt *gt, const u8 instance, return guc_irq_handler(>->uc.guc, iir); if (instance == OTHER_GTPM_INSTANCE) - return gen11_rps_irq_handler(gt, iir); + return gen11_rps_irq_handler(>->rps, iir); WARN_ONCE(1, "unhandled other interrupt instance=0x%x, iir=0x%x\n", instance, iir); @@ -336,7 +337,7 @@ void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]) } if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { - gen6_rps_irq_handler(gt->i915, gt_iir[2]); + gen6_rps_irq_handler(>->rps, gt_iir[2]); guc_irq_handler(>->uc.guc, gt_iir[2] >> 16); } } diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c index fac75afed35b..6187cdd06646 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c @@ -4,17 +4,38 @@ * Copyright © 2019 Intel Corporation */ +#include <linux/suspend.h> + #include "i915_drv.h" +#include "i915_globals.h" #include "i915_params.h" +#include "intel_context.h" #include "intel_engine_pm.h" #include "intel_gt.h" #include "intel_gt_pm.h" +#include "intel_gt_requests.h" +#include "intel_llc.h" #include "intel_pm.h" +#include "intel_rc6.h" +#include "intel_rps.h" #include "intel_wakeref.h" -static void pm_notify(struct drm_i915_private *i915, int state) +static void user_forcewake(struct intel_gt *gt, bool suspend) { - blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915); + int count = atomic_read(>->user_wakeref); + + /* Inside suspend/resume so single threaded, no races to worry about. */ + if (likely(!count)) + return; + + intel_gt_pm_get(gt); + if (suspend) { + GEM_BUG_ON(count > atomic_read(>->wakeref.count)); + atomic_sub(count, >->wakeref.count); + } else { + atomic_add(count, >->wakeref.count); + } + intel_gt_pm_put(gt); } static int __gt_unpark(struct intel_wakeref *wf) @@ -24,6 +45,8 @@ static int __gt_unpark(struct intel_wakeref *wf) GEM_TRACE("\n"); + i915_globals_unpark(); + /* * It seems that the DMC likes to transition between the DC states a lot * when there are no connected displays (no active power domains) during @@ -41,46 +64,41 @@ static int __gt_unpark(struct intel_wakeref *wf) if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); - intel_enable_gt_powersave(i915); - - i915_update_gfx_val(i915); - if (INTEL_GEN(i915) >= 6) - gen6_rps_busy(i915); - + intel_rps_unpark(>->rps); i915_pmu_gt_unparked(i915); - intel_gt_queue_hangcheck(gt); - - pm_notify(i915, INTEL_GT_UNPARK); + intel_gt_unpark_requests(gt); return 0; } static int __gt_park(struct intel_wakeref *wf) { - struct drm_i915_private *i915 = - container_of(wf, typeof(*i915), gt.wakeref); - intel_wakeref_t wakeref = fetch_and_zero(&i915->gt.awake); + struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref); + intel_wakeref_t wakeref = fetch_and_zero(>->awake); + struct drm_i915_private *i915 = gt->i915; GEM_TRACE("\n"); - pm_notify(i915, INTEL_GT_PARK); + intel_gt_park_requests(gt); + i915_vma_parked(gt); i915_pmu_gt_parked(i915); - if (INTEL_GEN(i915) >= 6) - gen6_rps_idle(i915); + intel_rps_park(>->rps); + + /* Everything switched off, flush any residual interrupt just in case */ + intel_synchronize_irq(i915); if (NEEDS_RC6_CTX_CORRUPTION_WA(i915)) { - i915_rc6_ctx_wa_check(i915); + intel_rc6_ctx_wa_check(&i915->gt.rc6); intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); } - /* Everything switched off, flush any residual interrupt just in case */ - intel_synchronize_irq(i915); - GEM_BUG_ON(!wakeref); intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref); + i915_globals_park(); + return 0; } @@ -92,9 +110,18 @@ static const struct intel_wakeref_ops wf_ops = { void intel_gt_pm_init_early(struct intel_gt *gt) { - intel_wakeref_init(>->wakeref, >->i915->runtime_pm, &wf_ops); + intel_wakeref_init(>->wakeref, gt->uncore->rpm, &wf_ops); +} - BLOCKING_INIT_NOTIFIER_HEAD(>->pm_notifications); +void intel_gt_pm_init(struct intel_gt *gt) +{ + /* + * Enabling power-management should be "self-healing". If we cannot + * enable a feature, simply leave it disabled with a notice to the + * user. + */ + intel_rc6_init(>->rc6); + intel_rps_init(>->rps); } static bool reset_engines(struct intel_gt *gt) @@ -119,16 +146,47 @@ void intel_gt_sanitize(struct intel_gt *gt, bool force) { struct intel_engine_cs *engine; enum intel_engine_id id; + intel_wakeref_t wakeref; - GEM_TRACE("\n"); + GEM_TRACE("force:%s\n", yesno(force)); + + /* Use a raw wakeref to avoid calling intel_display_power_get early */ + wakeref = intel_runtime_pm_get(gt->uncore->rpm); + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + + /* + * As we have just resumed the machine and woken the device up from + * deep PCI sleep (presumably D3_cold), assume the HW has been reset + * back to defaults, recovering from whatever wedged state we left it + * in and so worth trying to use the device once more. + */ + if (intel_gt_is_wedged(gt)) + intel_gt_unset_wedged(gt); intel_uc_sanitize(>->uc); - if (!reset_engines(gt) && !force) - return; + for_each_engine(engine, gt, id) + if (engine->reset.prepare) + engine->reset.prepare(engine); - for_each_engine(engine, gt->i915, id) - __intel_engine_reset(engine, false); + intel_uc_reset_prepare(>->uc); + + if (reset_engines(gt) || force) { + for_each_engine(engine, gt, id) + __intel_engine_reset(engine, false); + } + + for_each_engine(engine, gt, id) + if (engine->reset.finish) + engine->reset.finish(engine); + + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); +} + +void intel_gt_pm_fini(struct intel_gt *gt) +{ + intel_rc6_fini(>->rc6); } int intel_gt_resume(struct intel_gt *gt) @@ -137,6 +195,8 @@ int intel_gt_resume(struct intel_gt *gt) enum intel_engine_id id; int err = 0; + GEM_TRACE("\n"); + /* * After resume, we may need to poke into the pinned kernel * contexts to paper over any damage caused by the sudden suspend. @@ -144,14 +204,23 @@ int intel_gt_resume(struct intel_gt *gt) * allowing us to fixup the user contexts on their first pin. */ intel_gt_pm_get(gt); - for_each_engine(engine, gt->i915, id) { + + intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL); + intel_rc6_sanitize(>->rc6); + + intel_rps_enable(>->rps); + intel_llc_enable(>->llc); + + for_each_engine(engine, gt, id) { struct intel_context *ce; intel_engine_pm_get(engine); ce = engine->kernel_context; - if (ce) + if (ce) { + GEM_BUG_ON(!intel_context_is_pinned(ce)); ce->ops->reset(ce); + } engine->serial++; /* kernel context lost */ err = engine->resume(engine); @@ -164,19 +233,99 @@ int intel_gt_resume(struct intel_gt *gt) break; } } + + intel_rc6_enable(>->rc6); + + intel_uc_resume(>->uc); + + user_forcewake(gt, false); + + intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL); intel_gt_pm_put(gt); return err; } +static void wait_for_suspend(struct intel_gt *gt) +{ + if (!intel_gt_pm_is_awake(gt)) + return; + + if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) { + /* + * Forcibly cancel outstanding work and leave + * the gpu quiet. + */ + intel_gt_set_wedged(gt); + } + + intel_gt_pm_wait_for_idle(gt); +} + +void intel_gt_suspend_prepare(struct intel_gt *gt) +{ + user_forcewake(gt, true); + wait_for_suspend(gt); + + intel_uc_suspend(>->uc); +} + +static suspend_state_t pm_suspend_target(void) +{ +#if IS_ENABLED(CONFIG_PM_SLEEP) + return pm_suspend_target_state; +#else + return PM_SUSPEND_TO_IDLE; +#endif +} + +void intel_gt_suspend_late(struct intel_gt *gt) +{ + intel_wakeref_t wakeref; + + /* We expect to be idle already; but also want to be independent */ + wait_for_suspend(gt); + + /* + * On disabling the device, we want to turn off HW access to memory + * that we no longer own. + * + * However, not all suspend-states disable the device. S0 (s2idle) + * is effectively runtime-suspend, the device is left powered on + * but needs to be put into a low power state. We need to keep + * powermanagement enabled, but we also retain system state and so + * it remains safe to keep on using our allocated memory. + */ + if (pm_suspend_target() == PM_SUSPEND_TO_IDLE) + return; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) { + intel_rps_disable(>->rps); + intel_rc6_disable(>->rc6); + intel_llc_disable(>->llc); + } + + intel_gt_sanitize(gt, false); + + GEM_TRACE("\n"); +} + void intel_gt_runtime_suspend(struct intel_gt *gt) { intel_uc_runtime_suspend(>->uc); + + GEM_TRACE("\n"); } int intel_gt_runtime_resume(struct intel_gt *gt) { + GEM_TRACE("\n"); + intel_gt_init_swizzling(gt); return intel_uc_runtime_resume(>->uc); } + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_gt_pm.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.h b/drivers/gpu/drm/i915/gt/intel_gt_pm.h index fb39d99cd6ee..b3e17399be9b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.h @@ -12,11 +12,6 @@ #include "intel_gt_types.h" #include "intel_wakeref.h" -enum { - INTEL_GT_UNPARK, - INTEL_GT_PARK, -}; - static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt) { return intel_wakeref_is_active(>->wakeref); @@ -43,10 +38,21 @@ static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt) } void intel_gt_pm_init_early(struct intel_gt *gt); +void intel_gt_pm_init(struct intel_gt *gt); +void intel_gt_pm_fini(struct intel_gt *gt); void intel_gt_sanitize(struct intel_gt *gt, bool force); + +void intel_gt_suspend_prepare(struct intel_gt *gt); +void intel_gt_suspend_late(struct intel_gt *gt); int intel_gt_resume(struct intel_gt *gt); + void intel_gt_runtime_suspend(struct intel_gt *gt); int intel_gt_runtime_resume(struct intel_gt *gt); +static inline bool is_mock_gt(const struct intel_gt *gt) +{ + return I915_SELFTEST_ONLY(gt->awake == -ENODEV); +} + #endif /* INTEL_GT_PM_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c new file mode 100644 index 000000000000..353809ac2754 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c @@ -0,0 +1,137 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" /* for_each_engine() */ +#include "i915_request.h" +#include "intel_gt.h" +#include "intel_gt_pm.h" +#include "intel_gt_requests.h" +#include "intel_timeline.h" + +static void retire_requests(struct intel_timeline *tl) +{ + struct i915_request *rq, *rn; + + list_for_each_entry_safe(rq, rn, &tl->requests, link) + if (!i915_request_retire(rq)) + break; +} + +static void flush_submission(struct intel_gt *gt) +{ + struct intel_engine_cs *engine; + enum intel_engine_id id; + + for_each_engine(engine, gt, id) + intel_engine_flush_submission(engine); +} + +long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout) +{ + struct intel_gt_timelines *timelines = >->timelines; + struct intel_timeline *tl, *tn; + unsigned long active_count = 0; + unsigned long flags; + bool interruptible; + LIST_HEAD(free); + + interruptible = true; + if (unlikely(timeout < 0)) + timeout = -timeout, interruptible = false; + + flush_submission(gt); /* kick the ksoftirqd tasklets */ + + spin_lock_irqsave(&timelines->lock, flags); + list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { + if (!mutex_trylock(&tl->mutex)) { + active_count++; /* report busy to caller, try again? */ + continue; + } + + intel_timeline_get(tl); + GEM_BUG_ON(!tl->active_count); + tl->active_count++; /* pin the list element */ + spin_unlock_irqrestore(&timelines->lock, flags); + + if (timeout > 0) { + struct dma_fence *fence; + + fence = i915_active_fence_get(&tl->last_request); + if (fence) { + timeout = dma_fence_wait_timeout(fence, + interruptible, + timeout); + dma_fence_put(fence); + } + } + + retire_requests(tl); + + spin_lock_irqsave(&timelines->lock, flags); + + /* Resume iteration after dropping lock */ + list_safe_reset_next(tl, tn, link); + if (!--tl->active_count) + list_del(&tl->link); + else + active_count += !!rcu_access_pointer(tl->last_request.fence); + + mutex_unlock(&tl->mutex); + + /* Defer the final release to after the spinlock */ + if (refcount_dec_and_test(&tl->kref.refcount)) { + GEM_BUG_ON(tl->active_count); + list_add(&tl->link, &free); + } + } + spin_unlock_irqrestore(&timelines->lock, flags); + + list_for_each_entry_safe(tl, tn, &free, link) + __intel_timeline_free(&tl->kref); + + return active_count ? timeout : 0; +} + +int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout) +{ + /* If the device is asleep, we have no requests outstanding */ + if (!intel_gt_pm_is_awake(gt)) + return 0; + + while ((timeout = intel_gt_retire_requests_timeout(gt, timeout)) > 0) { + cond_resched(); + if (signal_pending(current)) + return -EINTR; + } + + return timeout; +} + +static void retire_work_handler(struct work_struct *work) +{ + struct intel_gt *gt = + container_of(work, typeof(*gt), requests.retire_work.work); + + intel_gt_retire_requests(gt); + schedule_delayed_work(>->requests.retire_work, + round_jiffies_up_relative(HZ)); +} + +void intel_gt_init_requests(struct intel_gt *gt) +{ + INIT_DELAYED_WORK(>->requests.retire_work, retire_work_handler); +} + +void intel_gt_park_requests(struct intel_gt *gt) +{ + cancel_delayed_work(>->requests.retire_work); +} + +void intel_gt_unpark_requests(struct intel_gt *gt) +{ + schedule_delayed_work(>->requests.retire_work, + round_jiffies_up_relative(HZ)); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.h b/drivers/gpu/drm/i915/gt/intel_gt_requests.h new file mode 100644 index 000000000000..bd31cbce47e0 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.h @@ -0,0 +1,24 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_GT_REQUESTS_H +#define INTEL_GT_REQUESTS_H + +struct intel_gt; + +long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout); +static inline void intel_gt_retire_requests(struct intel_gt *gt) +{ + intel_gt_retire_requests_timeout(gt, 0); +} + +int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout); + +void intel_gt_init_requests(struct intel_gt *gt); +void intel_gt_park_requests(struct intel_gt *gt); +void intel_gt_unpark_requests(struct intel_gt *gt); + +#endif /* INTEL_GT_REQUESTS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index dc295c196d11..d4e14dbd172e 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -17,7 +17,10 @@ #include "i915_vma.h" #include "intel_engine_types.h" +#include "intel_llc_types.h" #include "intel_reset_types.h" +#include "intel_rc6_types.h" +#include "intel_rps_types.h" #include "intel_wakeref.h" struct drm_i915_private; @@ -25,14 +28,6 @@ struct i915_ggtt; struct intel_engine_cs; struct intel_uncore; -struct intel_hangcheck { - /* For hangcheck timer */ -#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */ -#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) - - struct delayed_work work; -}; - struct intel_gt { struct drm_i915_private *i915; struct intel_uncore *uncore; @@ -49,12 +44,23 @@ struct intel_gt { struct list_head hwsp_free_list; } timelines; + struct intel_gt_requests { + /** + * We leave the user IRQ off as much as possible, + * but this means that requests will finish and never + * be retired once the system goes idle. Set a timer to + * fire periodically while the ring is running. When it + * fires, go retire requests. + */ + struct delayed_work retire_work; + } requests; + struct intel_wakeref wakeref; + atomic_t user_wakeref; struct list_head closed_vma; spinlock_t closed_lock; /* guards the list of closed_vma */ - struct intel_hangcheck hangcheck; struct intel_reset reset; /** @@ -66,7 +72,9 @@ struct intel_gt { */ intel_wakeref_t awake; - struct blocking_notifier_head pm_notifications; + struct intel_llc llc; + struct intel_rc6 rc6; + struct intel_rps rps; ktime_t last_init_time; @@ -89,14 +97,16 @@ enum intel_gt_scratch_field { INTEL_GT_SCRATCH_FIELD_DEFAULT = 0, /* 8 bytes */ - INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA = 128, - - /* 8 bytes */ INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128, /* 8 bytes */ INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256, + /* 6 * 8 bytes */ + INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048, + + /* 4 bytes */ + INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096, }; #endif /* __INTEL_GT_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c deleted file mode 100644 index 05d042cdefe2..000000000000 --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c +++ /dev/null @@ -1,360 +0,0 @@ -/* - * Copyright © 2016 Intel Corporation - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice (including the next - * paragraph) shall be included in all copies or substantial portions of the - * Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - * - */ - -#include "i915_drv.h" -#include "intel_engine.h" -#include "intel_gt.h" -#include "intel_reset.h" - -struct hangcheck { - u64 acthd; - u32 ring; - u32 head; - enum intel_engine_hangcheck_action action; - unsigned long action_timestamp; - int deadlock; - struct intel_instdone instdone; - bool wedged:1; - bool stalled:1; -}; - -static bool instdone_unchanged(u32 current_instdone, u32 *old_instdone) -{ - u32 tmp = current_instdone | *old_instdone; - bool unchanged; - - unchanged = tmp == *old_instdone; - *old_instdone |= tmp; - - return unchanged; -} - -static bool subunits_stuck(struct intel_engine_cs *engine) -{ - struct drm_i915_private *dev_priv = engine->i915; - struct intel_instdone instdone; - struct intel_instdone *accu_instdone = &engine->hangcheck.instdone; - bool stuck; - int slice; - int subslice; - - intel_engine_get_instdone(engine, &instdone); - - /* There might be unstable subunit states even when - * actual head is not moving. Filter out the unstable ones by - * accumulating the undone -> done transitions and only - * consider those as progress. - */ - stuck = instdone_unchanged(instdone.instdone, - &accu_instdone->instdone); - stuck &= instdone_unchanged(instdone.slice_common, - &accu_instdone->slice_common); - - for_each_instdone_slice_subslice(dev_priv, slice, subslice) { - stuck &= instdone_unchanged(instdone.sampler[slice][subslice], - &accu_instdone->sampler[slice][subslice]); - stuck &= instdone_unchanged(instdone.row[slice][subslice], - &accu_instdone->row[slice][subslice]); - } - - return stuck; -} - -static enum intel_engine_hangcheck_action -head_stuck(struct intel_engine_cs *engine, u64 acthd) -{ - if (acthd != engine->hangcheck.acthd) { - - /* Clear subunit states on head movement */ - memset(&engine->hangcheck.instdone, 0, - sizeof(engine->hangcheck.instdone)); - - return ENGINE_ACTIVE_HEAD; - } - - if (!subunits_stuck(engine)) - return ENGINE_ACTIVE_SUBUNITS; - - return ENGINE_DEAD; -} - -static enum intel_engine_hangcheck_action -engine_stuck(struct intel_engine_cs *engine, u64 acthd) -{ - enum intel_engine_hangcheck_action ha; - u32 tmp; - - ha = head_stuck(engine, acthd); - if (ha != ENGINE_DEAD) - return ha; - - if (IS_GEN(engine->i915, 2)) - return ENGINE_DEAD; - - /* Is the chip hanging on a WAIT_FOR_EVENT? - * If so we can simply poke the RB_WAIT bit - * and break the hang. This should work on - * all but the second generation chipsets. - */ - tmp = ENGINE_READ(engine, RING_CTL); - if (tmp & RING_WAIT) { - intel_gt_handle_error(engine->gt, engine->mask, 0, - "stuck wait on %s", engine->name); - ENGINE_WRITE(engine, RING_CTL, tmp); - return ENGINE_WAIT_KICK; - } - - return ENGINE_DEAD; -} - -static void hangcheck_load_sample(struct intel_engine_cs *engine, - struct hangcheck *hc) -{ - hc->acthd = intel_engine_get_active_head(engine); - hc->ring = ENGINE_READ(engine, RING_START); - hc->head = ENGINE_READ(engine, RING_HEAD); -} - -static void hangcheck_store_sample(struct intel_engine_cs *engine, - const struct hangcheck *hc) -{ - engine->hangcheck.acthd = hc->acthd; - engine->hangcheck.last_ring = hc->ring; - engine->hangcheck.last_head = hc->head; -} - -static enum intel_engine_hangcheck_action -hangcheck_get_action(struct intel_engine_cs *engine, - const struct hangcheck *hc) -{ - if (intel_engine_is_idle(engine)) - return ENGINE_IDLE; - - if (engine->hangcheck.last_ring != hc->ring) - return ENGINE_ACTIVE_SEQNO; - - if (engine->hangcheck.last_head != hc->head) - return ENGINE_ACTIVE_SEQNO; - - return engine_stuck(engine, hc->acthd); -} - -static void hangcheck_accumulate_sample(struct intel_engine_cs *engine, - struct hangcheck *hc) -{ - unsigned long timeout = I915_ENGINE_DEAD_TIMEOUT; - - hc->action = hangcheck_get_action(engine, hc); - - /* We always increment the progress - * if the engine is busy and still processing - * the same request, so that no single request - * can run indefinitely (such as a chain of - * batches). The only time we do not increment - * the hangcheck score on this ring, if this - * engine is in a legitimate wait for another - * engine. In that case the waiting engine is a - * victim and we want to be sure we catch the - * right culprit. Then every time we do kick - * the ring, make it as a progress as the seqno - * advancement might ensure and if not, it - * will catch the hanging engine. - */ - - switch (hc->action) { - case ENGINE_IDLE: - case ENGINE_ACTIVE_SEQNO: - /* Clear head and subunit states on seqno movement */ - hc->acthd = 0; - - memset(&engine->hangcheck.instdone, 0, - sizeof(engine->hangcheck.instdone)); - - /* Intentional fall through */ - case ENGINE_WAIT_KICK: - case ENGINE_WAIT: - engine->hangcheck.action_timestamp = jiffies; - break; - - case ENGINE_ACTIVE_HEAD: - case ENGINE_ACTIVE_SUBUNITS: - /* - * Seqno stuck with still active engine gets leeway, - * in hopes that it is just a long shader. - */ - timeout = I915_SEQNO_DEAD_TIMEOUT; - break; - - case ENGINE_DEAD: - break; - - default: - MISSING_CASE(hc->action); - } - - hc->stalled = time_after(jiffies, - engine->hangcheck.action_timestamp + timeout); - hc->wedged = time_after(jiffies, - engine->hangcheck.action_timestamp + - I915_ENGINE_WEDGED_TIMEOUT); -} - -static void hangcheck_declare_hang(struct intel_gt *gt, - intel_engine_mask_t hung, - intel_engine_mask_t stuck) -{ - struct intel_engine_cs *engine; - intel_engine_mask_t tmp; - char msg[80]; - int len; - - /* If some rings hung but others were still busy, only - * blame the hanging rings in the synopsis. - */ - if (stuck != hung) - hung &= ~stuck; - len = scnprintf(msg, sizeof(msg), - "%s on ", stuck == hung ? "no progress" : "hang"); - for_each_engine_masked(engine, gt->i915, hung, tmp) - len += scnprintf(msg + len, sizeof(msg) - len, - "%s, ", engine->name); - msg[len-2] = '\0'; - - return intel_gt_handle_error(gt, hung, I915_ERROR_CAPTURE, "%s", msg); -} - -/* - * This is called when the chip hasn't reported back with completed - * batchbuffers in a long time. We keep track per ring seqno progress and - * if there are no progress, hangcheck score for that ring is increased. - * Further, acthd is inspected to see if the ring is stuck. On stuck case - * we kick the ring. If we see no progress on three subsequent calls - * we assume chip is wedged and try to fix it by resetting the chip. - */ -static void hangcheck_elapsed(struct work_struct *work) -{ - struct intel_gt *gt = - container_of(work, typeof(*gt), hangcheck.work.work); - intel_engine_mask_t hung = 0, stuck = 0, wedged = 0; - struct intel_engine_cs *engine; - enum intel_engine_id id; - intel_wakeref_t wakeref; - - if (!i915_modparams.enable_hangcheck) - return; - - if (!READ_ONCE(gt->awake)) - return; - - if (intel_gt_is_wedged(gt)) - return; - - wakeref = intel_runtime_pm_get_if_in_use(>->i915->runtime_pm); - if (!wakeref) - return; - - /* As enabling the GPU requires fairly extensive mmio access, - * periodically arm the mmio checker to see if we are triggering - * any invalid access. - */ - intel_uncore_arm_unclaimed_mmio_detection(gt->uncore); - - for_each_engine(engine, gt->i915, id) { - struct hangcheck hc; - - intel_engine_signal_breadcrumbs(engine); - - hangcheck_load_sample(engine, &hc); - hangcheck_accumulate_sample(engine, &hc); - hangcheck_store_sample(engine, &hc); - - if (hc.stalled) { - hung |= engine->mask; - if (hc.action != ENGINE_DEAD) - stuck |= engine->mask; - } - - if (hc.wedged) - wedged |= engine->mask; - } - - if (GEM_SHOW_DEBUG() && (hung | stuck)) { - struct drm_printer p = drm_debug_printer("hangcheck"); - - for_each_engine(engine, gt->i915, id) { - if (intel_engine_is_idle(engine)) - continue; - - intel_engine_dump(engine, &p, "%s\n", engine->name); - } - } - - if (wedged) { - dev_err(gt->i915->drm.dev, - "GPU recovery timed out," - " cancelling all in-flight rendering.\n"); - GEM_TRACE_DUMP(); - intel_gt_set_wedged(gt); - } - - if (hung) - hangcheck_declare_hang(gt, hung, stuck); - - intel_runtime_pm_put(>->i915->runtime_pm, wakeref); - - /* Reset timer in case GPU hangs without another request being added */ - intel_gt_queue_hangcheck(gt); -} - -void intel_gt_queue_hangcheck(struct intel_gt *gt) -{ - unsigned long delay; - - if (unlikely(!i915_modparams.enable_hangcheck)) - return; - - /* - * Don't continually defer the hangcheck so that it is always run at - * least once after work has been scheduled on any ring. Otherwise, - * we will ignore a hung ring if a second ring is kept busy. - */ - - delay = round_jiffies_up_relative(DRM_I915_HANGCHECK_JIFFIES); - queue_delayed_work(system_long_wq, >->hangcheck.work, delay); -} - -void intel_engine_init_hangcheck(struct intel_engine_cs *engine) -{ - memset(&engine->hangcheck, 0, sizeof(engine->hangcheck)); - engine->hangcheck.action_timestamp = jiffies; -} - -void intel_gt_init_hangcheck(struct intel_gt *gt) -{ - INIT_DELAYED_WORK(>->hangcheck.work, hangcheck_elapsed); -} - -#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) -#include "selftest_hangcheck.c" -#endif diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c new file mode 100644 index 000000000000..ceb785b75c25 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_llc.c @@ -0,0 +1,161 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include <linux/cpufreq.h> + +#include "i915_drv.h" +#include "intel_gt.h" +#include "intel_llc.h" +#include "intel_sideband.h" + +struct ia_constants { + unsigned int min_gpu_freq; + unsigned int max_gpu_freq; + + unsigned int min_ring_freq; + unsigned int max_ia_freq; +}; + +static struct intel_gt *llc_to_gt(struct intel_llc *llc) +{ + return container_of(llc, struct intel_gt, llc); +} + +static unsigned int cpu_max_MHz(void) +{ + struct cpufreq_policy *policy; + unsigned int max_khz; + + policy = cpufreq_cpu_get(0); + if (policy) { + max_khz = policy->cpuinfo.max_freq; + cpufreq_cpu_put(policy); + } else { + /* + * Default to measured freq if none found, PCU will ensure we + * don't go over + */ + max_khz = tsc_khz; + } + + return max_khz / 1000; +} + +static bool get_ia_constants(struct intel_llc *llc, + struct ia_constants *consts) +{ + struct drm_i915_private *i915 = llc_to_gt(llc)->i915; + struct intel_rps *rps = &llc_to_gt(llc)->rps; + + if (rps->max_freq <= rps->min_freq) + return false; + + consts->max_ia_freq = cpu_max_MHz(); + + consts->min_ring_freq = + intel_uncore_read(llc_to_gt(llc)->uncore, DCLK) & 0xf; + /* convert DDR frequency from units of 266.6MHz to bandwidth */ + consts->min_ring_freq = mult_frac(consts->min_ring_freq, 8, 3); + + consts->min_gpu_freq = rps->min_freq; + consts->max_gpu_freq = rps->max_freq; + if (INTEL_GEN(i915) >= 9) { + /* Convert GT frequency to 50 HZ units */ + consts->min_gpu_freq /= GEN9_FREQ_SCALER; + consts->max_gpu_freq /= GEN9_FREQ_SCALER; + } + + return true; +} + +static void calc_ia_freq(struct intel_llc *llc, + unsigned int gpu_freq, + const struct ia_constants *consts, + unsigned int *out_ia_freq, + unsigned int *out_ring_freq) +{ + struct drm_i915_private *i915 = llc_to_gt(llc)->i915; + const int diff = consts->max_gpu_freq - gpu_freq; + unsigned int ia_freq = 0, ring_freq = 0; + + if (INTEL_GEN(i915) >= 9) { + /* + * ring_freq = 2 * GT. ring_freq is in 100MHz units + * No floor required for ring frequency on SKL. + */ + ring_freq = gpu_freq; + } else if (INTEL_GEN(i915) >= 8) { + /* max(2 * GT, DDR). NB: GT is 50MHz units */ + ring_freq = max(consts->min_ring_freq, gpu_freq); + } else if (IS_HASWELL(i915)) { + ring_freq = mult_frac(gpu_freq, 5, 4); + ring_freq = max(consts->min_ring_freq, ring_freq); + /* leave ia_freq as the default, chosen by cpufreq */ + } else { + const int min_freq = 15; + const int scale = 180; + + /* + * On older processors, there is no separate ring + * clock domain, so in order to boost the bandwidth + * of the ring, we need to upclock the CPU (ia_freq). + * + * For GPU frequencies less than 750MHz, + * just use the lowest ring freq. + */ + if (gpu_freq < min_freq) + ia_freq = 800; + else + ia_freq = consts->max_ia_freq - diff * scale / 2; + ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); + } + + *out_ia_freq = ia_freq; + *out_ring_freq = ring_freq; +} + +static void gen6_update_ring_freq(struct intel_llc *llc) +{ + struct drm_i915_private *i915 = llc_to_gt(llc)->i915; + struct ia_constants consts; + unsigned int gpu_freq; + + if (!get_ia_constants(llc, &consts)) + return; + + /* + * For each potential GPU frequency, load a ring frequency we'd like + * to use for memory access. We do this by specifying the IA frequency + * the PCU should use as a reference to determine the ring frequency. + */ + for (gpu_freq = consts.max_gpu_freq; + gpu_freq >= consts.min_gpu_freq; + gpu_freq--) { + unsigned int ia_freq, ring_freq; + + calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq); + sandybridge_pcode_write(i915, + GEN6_PCODE_WRITE_MIN_FREQ_TABLE, + ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | + ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | + gpu_freq); + } +} + +void intel_llc_enable(struct intel_llc *llc) +{ + if (HAS_LLC(llc_to_gt(llc)->i915)) + gen6_update_ring_freq(llc); +} + +void intel_llc_disable(struct intel_llc *llc) +{ + /* Currently there is no HW configuration to be done to disable. */ +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftest_llc.c" +#endif diff --git a/drivers/gpu/drm/i915/gt/intel_llc.h b/drivers/gpu/drm/i915/gt/intel_llc.h new file mode 100644 index 000000000000..ef09a890d2b7 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_llc.h @@ -0,0 +1,15 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_LLC_H +#define INTEL_LLC_H + +struct intel_llc; + +void intel_llc_enable(struct intel_llc *llc); +void intel_llc_disable(struct intel_llc *llc); + +#endif /* INTEL_LLC_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_llc_types.h b/drivers/gpu/drm/i915/gt/intel_llc_types.h new file mode 100644 index 000000000000..ecad4687b930 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_llc_types.h @@ -0,0 +1,13 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_LLC_TYPES_H +#define INTEL_LLC_TYPES_H + +struct intel_llc { +}; + +#endif /* INTEL_LLC_TYPES_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 06a506c29463..0ac3b26674ad 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -145,6 +145,7 @@ #include "intel_lrc_reg.h" #include "intel_mocs.h" #include "intel_reset.h" +#include "intel_ring.h" #include "intel_workarounds.h" #define RING_EXECLIST_QFULL (1 << 0x2) @@ -230,17 +231,42 @@ static int __execlists_context_alloc(struct intel_context *ce, struct intel_engine_cs *engine); static void execlists_init_reg_state(u32 *reg_state, - struct intel_context *ce, - struct intel_engine_cs *engine, - struct intel_ring *ring); + const struct intel_context *ce, + const struct intel_engine_cs *engine, + const struct intel_ring *ring, + bool close); +static void +__execlists_update_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine); static void mark_eio(struct i915_request *rq) { - if (!i915_request_signaled(rq)) - dma_fence_set_error(&rq->fence, -EIO); + if (i915_request_completed(rq)) + return; + + GEM_BUG_ON(i915_request_signaled(rq)); + + dma_fence_set_error(&rq->fence, -EIO); i915_request_mark_complete(rq); } +static struct i915_request * +active_request(const struct intel_timeline * const tl, struct i915_request *rq) +{ + struct i915_request *active = rq; + + rcu_read_lock(); + list_for_each_entry_continue_reverse(rq, &tl->requests, link) { + if (i915_request_completed(rq)) + break; + + active = rq; + } + rcu_read_unlock(); + + return active; +} + static inline u32 intel_hws_preempt_address(struct intel_engine_cs *engine) { return (i915_ggtt_offset(engine->status_page.vma) + @@ -337,10 +363,15 @@ static inline bool need_preempt(const struct intel_engine_cs *engine, * However, the priority hint is a mere hint that we may need to * preempt. If that hint is stale or we may be trying to preempt * ourselves, ignore the request. + * + * More naturally we would write + * prio >= max(0, last); + * except that we wish to prevent triggering preemption at the same + * priority level: the task that is running should remain running + * to preserve FIFO ordering of dependencies. */ - last_prio = effective_prio(rq); - if (!i915_scheduler_need_preempt(engine->execlists.queue_priority_hint, - last_prio)) + last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1); + if (engine->execlists.queue_priority_hint <= last_prio) return false; /* @@ -429,12 +460,8 @@ assert_priority_queue(const struct i915_request *prev, static u64 lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) { - struct i915_gem_context *ctx = ce->gem_context; u64 desc; - BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (BIT(GEN8_CTX_ID_WIDTH))); - BUILD_BUG_ON(GEN11_MAX_CONTEXT_HW_ID > (BIT(GEN11_SW_CTX_ID_WIDTH))); - desc = INTEL_LEGACY_32B_CONTEXT; if (i915_vm_is_4lvl(ce->vm)) desc = INTEL_LEGACY_64B_CONTEXT; @@ -444,33 +471,379 @@ lrc_descriptor(struct intel_context *ce, struct intel_engine_cs *engine) if (IS_GEN(engine->i915, 8)) desc |= GEN8_CTX_L3LLC_COHERENT; - desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE; - /* bits 12-31 */ + desc |= i915_ggtt_offset(ce->state); /* bits 12-31 */ /* * The following 32bits are copied into the OA reports (dword 2). * Consider updating oa_get_render_ctx_id in i915_perf.c when changing * anything below. */ if (INTEL_GEN(engine->i915) >= 11) { - GEM_BUG_ON(ctx->hw_id >= BIT(GEN11_SW_CTX_ID_WIDTH)); - desc |= (u64)ctx->hw_id << GEN11_SW_CTX_ID_SHIFT; - /* bits 37-47 */ - desc |= (u64)engine->instance << GEN11_ENGINE_INSTANCE_SHIFT; /* bits 48-53 */ - /* TODO: decide what to do with SW counter (bits 55-60) */ - desc |= (u64)engine->class << GEN11_ENGINE_CLASS_SHIFT; /* bits 61-63 */ - } else { - GEM_BUG_ON(ctx->hw_id >= BIT(GEN8_CTX_ID_WIDTH)); - desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */ } return desc; } +static u32 *set_offsets(u32 *regs, + const u8 *data, + const struct intel_engine_cs *engine) +#define NOP(x) (BIT(7) | (x)) +#define LRI(count, flags) ((flags) << 6 | (count)) +#define POSTED BIT(0) +#define REG(x) (((x) >> 2) | BUILD_BUG_ON_ZERO(x >= 0x200)) +#define REG16(x) \ + (((x) >> 9) | BIT(7) | BUILD_BUG_ON_ZERO(x >= 0x10000)), \ + (((x) >> 2) & 0x7f) +#define END() 0 +{ + const u32 base = engine->mmio_base; + + while (*data) { + u8 count, flags; + + if (*data & BIT(7)) { /* skip */ + regs += *data++ & ~BIT(7); + continue; + } + + count = *data & 0x3f; + flags = *data >> 6; + data++; + + *regs = MI_LOAD_REGISTER_IMM(count); + if (flags & POSTED) + *regs |= MI_LRI_FORCE_POSTED; + if (INTEL_GEN(engine->i915) >= 11) + *regs |= MI_LRI_CS_MMIO; + regs++; + + GEM_BUG_ON(!count); + do { + u32 offset = 0; + u8 v; + + do { + v = *data++; + offset <<= 7; + offset |= v & ~BIT(7); + } while (v & BIT(7)); + + *regs = base + (offset << 2); + regs += 2; + } while (--count); + } + + return regs; +} + +static const u8 gen8_xcs_offsets[] = { + NOP(1), + LRI(11, 0), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + + NOP(9), + LRI(9, 0), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + NOP(13), + LRI(2, 0), + REG16(0x200), + REG(0x028), + + END(), +}; + +static const u8 gen9_xcs_offsets[] = { + NOP(1), + LRI(14, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + + NOP(3), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + NOP(13), + LRI(1, POSTED), + REG16(0x200), + + NOP(13), + LRI(44, POSTED), + REG(0x028), + REG(0x09c), + REG(0x0c0), + REG(0x178), + REG(0x17c), + REG16(0x358), + REG(0x170), + REG(0x150), + REG(0x154), + REG(0x158), + REG16(0x41c), + REG16(0x600), + REG16(0x604), + REG16(0x608), + REG16(0x60c), + REG16(0x610), + REG16(0x614), + REG16(0x618), + REG16(0x61c), + REG16(0x620), + REG16(0x624), + REG16(0x628), + REG16(0x62c), + REG16(0x630), + REG16(0x634), + REG16(0x638), + REG16(0x63c), + REG16(0x640), + REG16(0x644), + REG16(0x648), + REG16(0x64c), + REG16(0x650), + REG16(0x654), + REG16(0x658), + REG16(0x65c), + REG16(0x660), + REG16(0x664), + REG16(0x668), + REG16(0x66c), + REG16(0x670), + REG16(0x674), + REG16(0x678), + REG16(0x67c), + REG(0x068), + + END(), +}; + +static const u8 gen12_xcs_offsets[] = { + NOP(1), + LRI(13, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + REG(0x180), + REG16(0x2b4), + + NOP(5), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + END(), +}; + +static const u8 gen8_rcs_offsets[] = { + NOP(1), + LRI(14, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + + NOP(3), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + NOP(13), + LRI(1, 0), + REG(0x0c8), + + END(), +}; + +static const u8 gen11_rcs_offsets[] = { + NOP(1), + LRI(15, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x11c), + REG(0x114), + REG(0x118), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + REG(0x180), + + NOP(1), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + LRI(1, POSTED), + REG(0x1b0), + + NOP(10), + LRI(1, 0), + REG(0x0c8), + + END(), +}; + +static const u8 gen12_rcs_offsets[] = { + NOP(1), + LRI(13, POSTED), + REG16(0x244), + REG(0x034), + REG(0x030), + REG(0x038), + REG(0x03c), + REG(0x168), + REG(0x140), + REG(0x110), + REG(0x1c0), + REG(0x1c4), + REG(0x1c8), + REG(0x180), + REG16(0x2b4), + + NOP(5), + LRI(9, POSTED), + REG16(0x3a8), + REG16(0x28c), + REG16(0x288), + REG16(0x284), + REG16(0x280), + REG16(0x27c), + REG16(0x278), + REG16(0x274), + REG16(0x270), + + LRI(3, POSTED), + REG(0x1b0), + REG16(0x5a8), + REG16(0x5ac), + + NOP(6), + LRI(1, 0), + REG(0x0c8), + + END(), +}; + +#undef END +#undef REG16 +#undef REG +#undef LRI +#undef NOP + +static const u8 *reg_offsets(const struct intel_engine_cs *engine) +{ + /* + * The gen12+ lists only have the registers we program in the basic + * default state. We rely on the context image using relative + * addressing to automatic fixup the register state between the + * physical engines for virtual engine. + */ + GEM_BUG_ON(INTEL_GEN(engine->i915) >= 12 && + !intel_engine_has_relative_mmio(engine)); + + if (engine->class == RENDER_CLASS) { + if (INTEL_GEN(engine->i915) >= 12) + return gen12_rcs_offsets; + else if (INTEL_GEN(engine->i915) >= 11) + return gen11_rcs_offsets; + else + return gen8_rcs_offsets; + } else { + if (INTEL_GEN(engine->i915) >= 12) + return gen12_xcs_offsets; + else if (INTEL_GEN(engine->i915) >= 9) + return gen9_xcs_offsets; + else + return gen8_xcs_offsets; + } +} + static void unwind_wa_tail(struct i915_request *rq) { rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES); @@ -489,7 +862,6 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) list_for_each_entry_safe_reverse(rq, rn, &engine->active.requests, sched.link) { - struct intel_engine_cs *owner; if (i915_request_completed(rq)) continue; /* XXX */ @@ -504,8 +876,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) * engine so that it can be moved across onto another physical * engine as load dictates. */ - owner = rq->hw_context->engine; - if (likely(owner == engine)) { + if (likely(rq->execution_mask == engine->mask)) { GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); if (rq_prio(rq) != prio) { prio = rq_prio(rq); @@ -516,6 +887,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) list_move(&rq->sched.link, pl); active = rq; } else { + struct intel_engine_cs *owner = rq->hw_context->engine; + /* * Decouple the virtual breadcrumb before moving it * back to the virtual engine -- we don't want the @@ -525,7 +898,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine) */ if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) { - spin_lock(&rq->lock); + spin_lock_nested(&rq->lock, + SINGLE_DEPTH_NESTING); i915_request_cancel_breadcrumb(rq); spin_unlock(&rq->lock); } @@ -561,6 +935,114 @@ execlists_context_status_change(struct i915_request *rq, unsigned long status) status, rq); } +static void intel_engine_context_in(struct intel_engine_cs *engine) +{ + unsigned long flags; + + if (READ_ONCE(engine->stats.enabled) == 0) + return; + + write_seqlock_irqsave(&engine->stats.lock, flags); + + if (engine->stats.enabled > 0) { + if (engine->stats.active++ == 0) + engine->stats.start = ktime_get(); + GEM_BUG_ON(engine->stats.active == 0); + } + + write_sequnlock_irqrestore(&engine->stats.lock, flags); +} + +static void intel_engine_context_out(struct intel_engine_cs *engine) +{ + unsigned long flags; + + if (READ_ONCE(engine->stats.enabled) == 0) + return; + + write_seqlock_irqsave(&engine->stats.lock, flags); + + if (engine->stats.enabled > 0) { + ktime_t last; + + if (engine->stats.active && --engine->stats.active == 0) { + /* + * Decrement the active context count and in case GPU + * is now idle add up to the running total. + */ + last = ktime_sub(ktime_get(), engine->stats.start); + + engine->stats.total = ktime_add(engine->stats.total, + last); + } else if (engine->stats.active == 0) { + /* + * After turning on engine stats, context out might be + * the first event in which case we account from the + * time stats gathering was turned on. + */ + last = ktime_sub(ktime_get(), engine->stats.enabled_at); + + engine->stats.total = ktime_add(engine->stats.total, + last); + } + } + + write_sequnlock_irqrestore(&engine->stats.lock, flags); +} + +static void restore_default_state(struct intel_context *ce, + struct intel_engine_cs *engine) +{ + u32 *regs = ce->lrc_reg_state; + + if (engine->pinned_default_state) + memcpy(regs, /* skip restoring the vanilla PPHWSP */ + engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, + engine->context_size - PAGE_SIZE); + + execlists_init_reg_state(regs, ce, engine, ce->ring, false); +} + +static void reset_active(struct i915_request *rq, + struct intel_engine_cs *engine) +{ + struct intel_context * const ce = rq->hw_context; + u32 head; + + /* + * The executing context has been cancelled. We want to prevent + * further execution along this context and propagate the error on + * to anything depending on its results. + * + * In __i915_request_submit(), we apply the -EIO and remove the + * requests' payloads for any banned requests. But first, we must + * rewind the context back to the start of the incomplete request so + * that we do not jump back into the middle of the batch. + * + * We preserve the breadcrumbs and semaphores of the incomplete + * requests so that inter-timeline dependencies (i.e other timelines) + * remain correctly ordered. And we defer to __i915_request_submit() + * so that all asynchronous waits are correctly handled. + */ + GEM_TRACE("%s(%s): { rq=%llx:%lld }\n", + __func__, engine->name, rq->fence.context, rq->fence.seqno); + + /* On resubmission of the active request, payload will be scrubbed */ + if (i915_request_completed(rq)) + head = rq->tail; + else + head = active_request(ce->timeline, rq)->head; + ce->ring->head = intel_ring_wrap(ce->ring, head); + intel_ring_update_space(ce->ring); + + /* Scrub the context image to prevent replaying the previous batch */ + restore_default_state(ce, engine); + __execlists_update_reg_state(ce, engine); + + /* We've switched away, so this should be a no-op, but intent matters */ + ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; +} + static inline struct intel_engine_cs * __execlists_schedule_in(struct i915_request *rq) { @@ -569,6 +1051,21 @@ __execlists_schedule_in(struct i915_request *rq) intel_context_get(ce); + if (unlikely(i915_gem_context_is_banned(ce->gem_context))) + reset_active(rq, engine); + + if (ce->tag) { + /* Use a fixed tag for OA and friends */ + ce->lrc_desc |= (u64)ce->tag << 32; + } else { + /* We don't need a strict matching tag, just different values */ + ce->lrc_desc &= ~GENMASK_ULL(47, 37); + ce->lrc_desc |= + (u64)(engine->context_tag++ % NUM_CONTEXT_TAG) << + GEN11_SW_CTX_ID_SHIFT; + BUILD_BUG_ON(NUM_CONTEXT_TAG > GEN12_MAX_CONTEXT_HW_ID); + } + intel_gt_pm_get(engine->gt); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); intel_engine_context_in(engine); @@ -612,6 +1109,12 @@ __execlists_schedule_out(struct i915_request *rq, { struct intel_context * const ce = rq->hw_context; + /* + * NB process_csb() is not under the engine->active.lock and hence + * schedule_out can race with schedule_in meaning that we should + * refrain from doing non-trivial work here. + */ + intel_engine_context_out(engine); execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); intel_gt_pm_put(engine->gt); @@ -654,7 +1157,7 @@ static u64 execlists_update_context(const struct i915_request *rq) struct intel_context *ce = rq->hw_context; u64 desc; - ce->lrc_reg_state[CTX_RING_TAIL + 1] = + ce->lrc_reg_state[CTX_RING_TAIL] = intel_ring_set_tail(rq->ring, rq->tail); /* @@ -677,6 +1180,10 @@ static u64 execlists_update_context(const struct i915_request *rq) desc = ce->lrc_desc; ce->lrc_desc &= ~CTX_DESC_FORCE_RESTORE; + /* Wa_1607138340:tgl */ + if (IS_TGL_REVID(rq->i915, TGL_REVID_A0, TGL_REVID_A0)) + desc |= CTX_DESC_FORCE_RESTORE; + return desc; } @@ -699,6 +1206,9 @@ trace_ports(const struct intel_engine_execlists *execlists, const struct intel_engine_cs *engine = container_of(execlists, typeof(*engine), execlists); + if (!ports[0]) + return; + GEM_TRACE("%s: %s { %llx:%lld%s, %llx:%lld }\n", engine->name, msg, ports[0]->fence.context, @@ -719,25 +1229,45 @@ assert_pending_valid(const struct intel_engine_execlists *execlists, trace_ports(execlists, msg, execlists->pending); - if (!execlists->pending[0]) + if (!execlists->pending[0]) { + GEM_TRACE_ERR("Nothing pending for promotion!\n"); return false; + } - if (execlists->pending[execlists_num_ports(execlists)]) + if (execlists->pending[execlists_num_ports(execlists)]) { + GEM_TRACE_ERR("Excess pending[%d] for promotion!\n", + execlists_num_ports(execlists)); return false; + } for (port = execlists->pending; (rq = *port); port++) { - if (ce == rq->hw_context) + if (ce == rq->hw_context) { + GEM_TRACE_ERR("Duplicate context in pending[%zd]\n", + port - execlists->pending); return false; + } ce = rq->hw_context; if (i915_request_completed(rq)) continue; - if (i915_active_is_idle(&ce->active)) + if (i915_active_is_idle(&ce->active)) { + GEM_TRACE_ERR("Inactive context in pending[%zd]\n", + port - execlists->pending); + return false; + } + + if (!i915_vma_is_pinned(ce->state)) { + GEM_TRACE_ERR("Unpinned context in pending[%zd]\n", + port - execlists->pending); return false; + } - if (!i915_vma_is_pinned(ce->state)) + if (!i915_vma_is_pinned(ce->ring->vma)) { + GEM_TRACE_ERR("Unpinned ringbuffer in pending[%zd]\n", + port - execlists->pending); return false; + } } return ce; @@ -814,6 +1344,10 @@ static bool can_merge_rq(const struct i915_request *prev, if (i915_request_completed(next)) return true; + if (unlikely((prev->flags ^ next->flags) & + (I915_REQUEST_NOPREEMPT | I915_REQUEST_SENTINEL))) + return false; + if (!can_merge_ctx(prev->hw_context, next->hw_context)) return false; @@ -823,47 +1357,7 @@ static bool can_merge_rq(const struct i915_request *prev, static void virtual_update_register_offsets(u32 *regs, struct intel_engine_cs *engine) { - u32 base = engine->mmio_base; - - /* Must match execlists_init_reg_state()! */ - - regs[CTX_CONTEXT_CONTROL] = - i915_mmio_reg_offset(RING_CONTEXT_CONTROL(base)); - regs[CTX_RING_HEAD] = i915_mmio_reg_offset(RING_HEAD(base)); - regs[CTX_RING_TAIL] = i915_mmio_reg_offset(RING_TAIL(base)); - regs[CTX_RING_BUFFER_START] = i915_mmio_reg_offset(RING_START(base)); - regs[CTX_RING_BUFFER_CONTROL] = i915_mmio_reg_offset(RING_CTL(base)); - - regs[CTX_BB_HEAD_U] = i915_mmio_reg_offset(RING_BBADDR_UDW(base)); - regs[CTX_BB_HEAD_L] = i915_mmio_reg_offset(RING_BBADDR(base)); - regs[CTX_BB_STATE] = i915_mmio_reg_offset(RING_BBSTATE(base)); - regs[CTX_SECOND_BB_HEAD_U] = - i915_mmio_reg_offset(RING_SBBADDR_UDW(base)); - regs[CTX_SECOND_BB_HEAD_L] = i915_mmio_reg_offset(RING_SBBADDR(base)); - regs[CTX_SECOND_BB_STATE] = i915_mmio_reg_offset(RING_SBBSTATE(base)); - - regs[CTX_CTX_TIMESTAMP] = - i915_mmio_reg_offset(RING_CTX_TIMESTAMP(base)); - regs[CTX_PDP3_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 3)); - regs[CTX_PDP3_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 3)); - regs[CTX_PDP2_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 2)); - regs[CTX_PDP2_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 2)); - regs[CTX_PDP1_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 1)); - regs[CTX_PDP1_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 1)); - regs[CTX_PDP0_UDW] = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, 0)); - regs[CTX_PDP0_LDW] = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, 0)); - - if (engine->class == RENDER_CLASS) { - regs[CTX_RCS_INDIRECT_CTX] = - i915_mmio_reg_offset(RING_INDIRECT_CTX(base)); - regs[CTX_RCS_INDIRECT_CTX_OFFSET] = - i915_mmio_reg_offset(RING_INDIRECT_CTX_OFFSET(base)); - regs[CTX_BB_PER_CTX_PTR] = - i915_mmio_reg_offset(RING_BB_PER_CTX_PTR(base)); - - regs[CTX_R_PWR_CLK_STATE] = - i915_mmio_reg_offset(GEN8_R_PWR_CLK_STATE); - } + set_offsets(regs, reg_offsets(engine), engine); } static bool virtual_matches(const struct virtual_engine *ve, @@ -978,7 +1472,7 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) { int hint; - if (!intel_engine_has_semaphores(engine)) + if (!intel_engine_has_timeslices(engine)) return false; if (list_is_last(&rq->sched.link, &engine->active.requests)) @@ -999,15 +1493,32 @@ switch_prio(struct intel_engine_cs *engine, const struct i915_request *rq) return rq_prio(list_next_entry(rq, sched.link)); } -static bool -enable_timeslice(const struct intel_engine_execlists *execlists) +static inline unsigned long +timeslice(const struct intel_engine_cs *engine) +{ + return READ_ONCE(engine->props.timeslice_duration_ms); +} + +static unsigned long +active_timeslice(const struct intel_engine_cs *engine) { - const struct i915_request *rq = *execlists->active; + const struct i915_request *rq = *engine->execlists.active; if (i915_request_completed(rq)) - return false; + return 0; + + if (engine->execlists.switch_priority_hint < effective_prio(rq)) + return 0; + + return timeslice(engine); +} + +static void set_timeslice(struct intel_engine_cs *engine) +{ + if (!intel_engine_has_timeslices(engine)) + return; - return execlists->switch_priority_hint >= effective_prio(rq); + set_timer_ms(&engine->execlists.timer, active_timeslice(engine)); } static void record_preemption(struct intel_engine_execlists *execlists) @@ -1015,6 +1526,30 @@ static void record_preemption(struct intel_engine_execlists *execlists) (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); } +static unsigned long active_preempt_timeout(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = last_active(&engine->execlists); + if (!rq) + return 0; + + /* Force a fast reset for terminated contexts (ignoring sysfs!) */ + if (unlikely(i915_gem_context_is_banned(rq->gem_context))) + return 1; + + return READ_ONCE(engine->props.preempt_timeout_ms); +} + +static void set_preempt_timeout(struct intel_engine_cs *engine) +{ + if (!intel_engine_has_preempt_reset(engine)) + return; + + set_timer_ms(&engine->execlists.preempt, + active_preempt_timeout(engine)); +} + static void execlists_dequeue(struct intel_engine_cs *engine) { struct intel_engine_execlists * const execlists = &engine->execlists; @@ -1111,7 +1646,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE; last = NULL; } else if (need_timeslice(engine, last) && - !timer_pending(&engine->execlists.timer)) { + timer_expired(&engine->execlists.timer)) { GEM_TRACE("%s: expired last=%llx:%lld, prio=%d, hint=%d\n", engine->name, last->fence.context, @@ -1147,8 +1682,18 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * submission. */ if (!list_is_last(&last->sched.link, - &engine->active.requests)) + &engine->active.requests)) { + /* + * Even if ELSP[1] is occupied and not worthy + * of timeslices, our queue might be. + */ + if (!execlists->timer.expires && + need_timeslice(engine, last)) + set_timer_ms(&execlists->timer, + timeslice(engine)); + return; + } /* * WaIdleLiteRestore:bdw,skl @@ -1216,7 +1761,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine) unsigned int n; GEM_BUG_ON(READ_ONCE(ve->context.inflight)); - virtual_update_register_offsets(regs, engine); + + if (!intel_engine_has_relative_mmio(engine)) + virtual_update_register_offsets(regs, + engine); if (!list_empty(&ve->context.signals)) virtual_xfer_breadcrumbs(ve, engine); @@ -1299,6 +1847,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine) if (last->hw_context == rq->hw_context) goto done; + if (i915_request_has_sentinel(last)) + goto done; + /* * If GVT overrides us we only ever submit * port[0], leaving port[1] empty. Note that we @@ -1357,11 +1908,28 @@ done: if (submit) { *port = execlists_schedule_in(last, port - execlists->pending); - memset(port + 1, 0, (last_port - port) * sizeof(*port)); execlists->switch_priority_hint = switch_prio(engine, *execlists->pending); + + /* + * Skip if we ended up with exactly the same set of requests, + * e.g. trying to timeslice a pair of ordered contexts + */ + if (!memcmp(execlists->active, execlists->pending, + (port - execlists->pending + 1) * sizeof(*port))) { + do + execlists_schedule_out(fetch_and_zero(port)); + while (port-- != execlists->pending); + + goto skip_submit; + } + + memset(port + 1, 0, (last_port - port) * sizeof(*port)); execlists_submit_ports(engine); + + set_preempt_timeout(engine); } else { +skip_submit: ring_set_paused(engine, 0); } } @@ -1394,13 +1962,6 @@ reset_in_progress(const struct intel_engine_execlists *execlists) return unlikely(!__tasklet_is_enabled(&execlists->tasklet)); } -enum csb_step { - CSB_NOP, - CSB_PROMOTE, - CSB_PREEMPT, - CSB_COMPLETE, -}; - /* * Starting with Gen12, the status has a new format: * @@ -1427,7 +1988,7 @@ enum csb_step { * bits 47-57: sw context id of the lrc the GT switched away from * bits 58-63: sw counter of the lrc the GT switched away from */ -static inline enum csb_step +static inline bool gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) { u32 lower_dw = csb[0]; @@ -1436,9 +1997,6 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) bool ctx_away_valid = GEN12_CSB_CTX_VALID(upper_dw); bool new_queue = lower_dw & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE; - if (!ctx_away_valid && ctx_to_valid) - return CSB_PROMOTE; - /* * The context switch detail is not guaranteed to be 5 when a preemption * occurs, so we can't just check for that. The check below works for @@ -1446,8 +2004,10 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) * instructions and lite-restore. Preempt-to-idle via the CTRL register * would require some extra handling, but we don't support that. */ - if (new_queue && ctx_away_valid) - return CSB_PREEMPT; + if (!ctx_away_valid || new_queue) { + GEM_BUG_ON(!ctx_to_valid); + return true; + } /* * switch detail = 5 is covered by the case above and we do not expect a @@ -1455,30 +2015,13 @@ gen12_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) * use polling mode. */ GEM_BUG_ON(GEN12_CTX_SWITCH_DETAIL(upper_dw)); - - if (*execlists->active) { - GEM_BUG_ON(!ctx_away_valid); - return CSB_COMPLETE; - } - - return CSB_NOP; + return false; } -static inline enum csb_step +static inline bool gen8_csb_parse(const struct intel_engine_execlists *execlists, const u32 *csb) { - unsigned int status = *csb; - - if (status & GEN8_CTX_STATUS_IDLE_ACTIVE) - return CSB_PROMOTE; - - if (status & GEN8_CTX_STATUS_PREEMPTED) - return CSB_PREEMPT; - - if (*execlists->active) - return CSB_COMPLETE; - - return CSB_NOP; + return *csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); } static void process_csb(struct intel_engine_cs *engine) @@ -1488,7 +2031,14 @@ static void process_csb(struct intel_engine_cs *engine) const u8 num_entries = execlists->csb_size; u8 head, tail; - GEM_BUG_ON(USES_GUC_SUBMISSION(engine->i915)); + /* + * As we modify our execlists state tracking we require exclusive + * access. Either we are inside the tasklet, or the tasklet is disabled + * and we assume that is only inside the reset paths and so serialised. + */ + GEM_BUG_ON(!tasklet_is_locked(&execlists->tasklet) && + !reset_in_progress(execlists)); + GEM_BUG_ON(!intel_engine_in_execlists_submission_mode(engine)); /* * Note that csb_write, csb_status may be either in HWSP or mmio. @@ -1517,7 +2067,7 @@ static void process_csb(struct intel_engine_cs *engine) rmb(); do { - enum csb_step csb_step; + bool promote; if (++head == num_entries) head = 0; @@ -1545,20 +2095,19 @@ static void process_csb(struct intel_engine_cs *engine) buf[2 * head + 0], buf[2 * head + 1]); if (INTEL_GEN(engine->i915) >= 12) - csb_step = gen12_csb_parse(execlists, buf + 2 * head); + promote = gen12_csb_parse(execlists, buf + 2 * head); else - csb_step = gen8_csb_parse(execlists, buf + 2 * head); + promote = gen8_csb_parse(execlists, buf + 2 * head); + if (promote) { + if (!inject_preempt_hang(execlists)) + ring_set_paused(engine, 0); - switch (csb_step) { - case CSB_PREEMPT: /* cancel old inflight, prepare for switch */ + /* cancel old inflight, prepare for switch */ trace_ports(execlists, "preempted", execlists->active); - while (*execlists->active) execlists_schedule_out(*execlists->active++); - /* fallthrough */ - case CSB_PROMOTE: /* switch pending to inflight */ - GEM_BUG_ON(*execlists->active); + /* switch pending to inflight */ GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); execlists->active = memcpy(execlists->inflight, @@ -1566,16 +2115,13 @@ static void process_csb(struct intel_engine_cs *engine) execlists_num_ports(execlists) * sizeof(*execlists->pending)); - if (enable_timeslice(execlists)) - mod_timer(&execlists->timer, jiffies + 1); - - if (!inject_preempt_hang(execlists)) - ring_set_paused(engine, 0); + set_timeslice(engine); WRITE_ONCE(execlists->pending[0], NULL); - break; + } else { + GEM_BUG_ON(!*execlists->active); - case CSB_COMPLETE: /* port0 completed, advanced to port1 */ + /* port0 completed, advanced to port1 */ trace_ports(execlists, "completed", execlists->active); /* @@ -1590,10 +2136,6 @@ static void process_csb(struct intel_engine_cs *engine) GEM_BUG_ON(execlists->active - execlists->inflight > execlists_num_ports(execlists)); - break; - - case CSB_NOP: - break; } } while (head != tail); @@ -1623,6 +2165,43 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) } } +static noinline void preempt_reset(struct intel_engine_cs *engine) +{ + const unsigned int bit = I915_RESET_ENGINE + engine->id; + unsigned long *lock = &engine->gt->reset.flags; + + if (i915_modparams.reset < 3) + return; + + if (test_and_set_bit(bit, lock)) + return; + + /* Mark this tasklet as disabled to avoid waiting for it to complete */ + tasklet_disable_nosync(&engine->execlists.tasklet); + + GEM_TRACE("%s: preempt timeout %lu+%ums\n", + engine->name, + READ_ONCE(engine->props.preempt_timeout_ms), + jiffies_to_msecs(jiffies - engine->execlists.preempt.expires)); + intel_engine_reset(engine, "preemption time out"); + + tasklet_enable(&engine->execlists.tasklet); + clear_and_wake_up_bit(bit, lock); +} + +static bool preempt_timeout(const struct intel_engine_cs *const engine) +{ + const struct timer_list *t = &engine->execlists.preempt; + + if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) + return false; + + if (!timer_expired(t)) + return false; + + return READ_ONCE(engine->execlists.pending[0]); +} + /* * Check the unread Context Status Buffers and manage the submission of new * contexts to the ELSP accordingly. @@ -1630,23 +2209,39 @@ static void __execlists_submission_tasklet(struct intel_engine_cs *const engine) static void execlists_submission_tasklet(unsigned long data) { struct intel_engine_cs * const engine = (struct intel_engine_cs *)data; - unsigned long flags; + bool timeout = preempt_timeout(engine); process_csb(engine); - if (!READ_ONCE(engine->execlists.pending[0])) { + if (!READ_ONCE(engine->execlists.pending[0]) || timeout) { + unsigned long flags; + spin_lock_irqsave(&engine->active.lock, flags); __execlists_submission_tasklet(engine); spin_unlock_irqrestore(&engine->active.lock, flags); + + /* Recheck after serialising with direct-submission */ + if (timeout && preempt_timeout(engine)) + preempt_reset(engine); } } -static void execlists_submission_timer(struct timer_list *timer) +static void __execlists_kick(struct intel_engine_execlists *execlists) { - struct intel_engine_cs *engine = - from_timer(engine, timer, execlists.timer); - /* Kick the tasklet for some interrupt coalescing and reset handling */ - tasklet_hi_schedule(&engine->execlists.tasklet); + tasklet_hi_schedule(&execlists->tasklet); +} + +#define execlists_kick(t, member) \ + __execlists_kick(container_of(t, struct intel_engine_execlists, member)) + +static void execlists_timeslice(struct timer_list *timer) +{ + execlists_kick(timer, timer); +} + +static void execlists_preempt(struct timer_list *timer) +{ + execlists_kick(timer, preempt); } static void queue_request(struct intel_engine_cs *engine, @@ -1726,7 +2321,6 @@ set_redzone(void *vaddr, const struct intel_engine_cs *engine) if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) return; - vaddr += LRC_HEADER_PAGES * PAGE_SIZE; vaddr += engine->context_size; memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE); @@ -1738,7 +2332,6 @@ check_redzone(const void *vaddr, const struct intel_engine_cs *engine) if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) return; - vaddr += LRC_HEADER_PAGES * PAGE_SIZE; vaddr += engine->context_size; if (memchr_inv(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE)) @@ -1752,14 +2345,13 @@ static void execlists_context_unpin(struct intel_context *ce) check_redzone((void *)ce->lrc_reg_state - LRC_STATE_PN * PAGE_SIZE, ce->engine); - i915_gem_context_unpin_hw_id(ce->gem_context); i915_gem_object_unpin_map(ce->state->obj); intel_ring_reset(ce->ring, ce->ring->tail); } static void -__execlists_update_reg_state(struct intel_context *ce, - struct intel_engine_cs *engine) +__execlists_update_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine) { struct intel_ring *ring = ce->ring; u32 *regs = ce->lrc_reg_state; @@ -1767,16 +2359,16 @@ __execlists_update_reg_state(struct intel_context *ce, GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head)); GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail)); - regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(ring->vma); - regs[CTX_RING_HEAD + 1] = ring->head; - regs[CTX_RING_TAIL + 1] = ring->tail; + regs[CTX_RING_BUFFER_START] = i915_ggtt_offset(ring->vma); + regs[CTX_RING_HEAD] = ring->head; + regs[CTX_RING_TAIL] = ring->tail; /* RPCS */ if (engine->class == RENDER_CLASS) { - regs[CTX_R_PWR_CLK_STATE + 1] = + regs[CTX_R_PWR_CLK_STATE] = intel_sseu_make_rpcs(engine->i915, &ce->sseu); - i915_oa_init_reg_state(engine, ce, regs); + i915_oa_init_reg_state(ce, engine); } } @@ -1802,18 +2394,12 @@ __execlists_context_pin(struct intel_context *ce, goto unpin_active; } - ret = i915_gem_context_pin_hw_id(ce->gem_context); - if (ret) - goto unpin_map; - ce->lrc_desc = lrc_descriptor(ce, engine); ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE; __execlists_update_reg_state(ce, engine); return 0; -unpin_map: - i915_gem_object_unpin_map(ce->state->obj); unpin_active: intel_context_active_release(ce); err: @@ -1869,7 +2455,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) { u32 *cs; - GEM_BUG_ON(!rq->timeline->has_initial_breadcrumb); + GEM_BUG_ON(!i915_request_timeline(rq)->has_initial_breadcrumb); cs = intel_ring_begin(rq, 6); if (IS_ERR(cs)) @@ -1885,7 +2471,7 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) *cs++ = MI_NOOP; *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = rq->timeline->hwsp_offset; + *cs++ = i915_request_timeline(rq)->hwsp_offset; *cs++ = 0; *cs++ = rq->fence.seqno - 1; @@ -1897,60 +2483,6 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq) return 0; } -static int emit_pdps(struct i915_request *rq) -{ - const struct intel_engine_cs * const engine = rq->engine; - struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->hw_context->vm); - int err, i; - u32 *cs; - - GEM_BUG_ON(intel_vgpu_active(rq->i915)); - - /* - * Beware ye of the dragons, this sequence is magic! - * - * Small changes to this sequence can cause anything from - * GPU hangs to forcewake errors and machine lockups! - */ - - /* Flush any residual operations from the context load */ - err = engine->emit_flush(rq, EMIT_FLUSH); - if (err) - return err; - - /* Magic required to prevent forcewake errors! */ - err = engine->emit_flush(rq, EMIT_INVALIDATE); - if (err) - return err; - - cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - /* Ensure the LRI have landed before we invalidate & continue */ - *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; - for (i = GEN8_3LVL_PDPES; i--; ) { - const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); - u32 base = engine->mmio_base; - - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); - *cs++ = upper_32_bits(pd_daddr); - *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); - *cs++ = lower_32_bits(pd_daddr); - } - *cs++ = MI_NOOP; - - intel_ring_advance(rq, cs); - - /* Be doubly sure the LRI have landed before proceeding */ - err = engine->emit_flush(rq, EMIT_FLUSH); - if (err) - return err; - - /* Re-invalidate the TLB for luck */ - return engine->emit_flush(rq, EMIT_INVALIDATE); -} - static int execlists_request_alloc(struct i915_request *request) { int ret; @@ -1973,10 +2505,7 @@ static int execlists_request_alloc(struct i915_request *request) */ /* Unconditionally invalidate GPU caches and TLBs. */ - if (i915_vm_is_4lvl(request->hw_context->vm)) - ret = request->engine->emit_flush(request, EMIT_INVALIDATE); - else - ret = emit_pdps(request); + ret = request->engine->emit_flush(request, EMIT_INVALIDATE); if (ret) return ret; @@ -2028,12 +2557,6 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch) return batch; } -static u32 slm_offset(struct intel_engine_cs *engine) -{ - return intel_gt_scratch_offset(engine->gt, - INTEL_GT_SCRATCH_FIELD_CLEAR_SLM_WA); -} - /* * Typically we only have one indirect_ctx and per_ctx batch buffer which are * initialized at the beginning and shared across all contexts but this field @@ -2062,10 +2585,10 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch) /* Actual scratch location is at 128 bytes offset */ batch = gen8_emit_pipe_control(batch, PIPE_CONTROL_FLUSH_L3 | - PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_STORE_DATA_INDEX | PIPE_CONTROL_CS_STALL | PIPE_CONTROL_QW_WRITE, - slm_offset(engine)); + LRC_PPHWSP_SCRATCH_ADDR); *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; @@ -2423,27 +2946,29 @@ static void reset_csb_pointers(struct intel_engine_cs *engine) &execlists->csb_status[reset_value]); } -static struct i915_request *active_request(struct i915_request *rq) +static int lrc_ring_mi_mode(const struct intel_engine_cs *engine) { - const struct intel_context * const ce = rq->hw_context; - struct i915_request *active = NULL; - struct list_head *list; - - if (!i915_request_is_active(rq)) /* unwound, but incomplete! */ - return rq; - - list = &rq->timeline->requests; - list_for_each_entry_from_reverse(rq, list, link) { - if (i915_request_completed(rq)) - break; + if (INTEL_GEN(engine->i915) >= 12) + return 0x60; + else if (INTEL_GEN(engine->i915) >= 9) + return 0x54; + else if (engine->class == RENDER_CLASS) + return 0x58; + else + return -1; +} - if (rq->hw_context != ce) - break; +static void __execlists_reset_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine) +{ + u32 *regs = ce->lrc_reg_state; + int x; - active = rq; + x = lrc_ring_mi_mode(engine); + if (x != -1) { + regs[x + 1] &= ~STOP_RING; + regs[x + 1] |= STOP_RING << 16; } - - return active; } static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) @@ -2451,7 +2976,10 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) struct intel_engine_execlists * const execlists = &engine->execlists; struct intel_context *ce; struct i915_request *rq; - u32 *regs; + + mb(); /* paranoia: read the CSB pointers from after the reset */ + clflush(execlists->csb_write); + mb(); process_csb(engine); /* drain preemption events */ @@ -2467,16 +2995,23 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) if (!rq) goto unwind; + /* We still have requests in-flight; the engine should be active */ + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); + ce = rq->hw_context; - GEM_BUG_ON(i915_active_is_idle(&ce->active)); GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); - rq = active_request(rq); - if (!rq) { - ce->ring->head = ce->ring->tail; + + if (i915_request_completed(rq)) { + /* Idle context; tidy up the ring so we can restart afresh */ + ce->ring->head = intel_ring_wrap(ce->ring, rq->tail); goto out_replay; } + /* Context has requests still in-flight; it should not be idle! */ + GEM_BUG_ON(i915_active_is_idle(&ce->active)); + rq = active_request(ce->timeline, rq); ce->ring->head = intel_ring_wrap(ce->ring, rq->head); + GEM_BUG_ON(ce->ring->head == ce->ring->tail); /* * If this request hasn't started yet, e.g. it is waiting on a @@ -2516,19 +3051,16 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled) * future request will be after userspace has had the opportunity * to recreate its own state. */ - regs = ce->lrc_reg_state; - if (engine->pinned_default_state) { - memcpy(regs, /* skip restoring the vanilla PPHWSP */ - engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, - engine->context_size - PAGE_SIZE); - } - execlists_init_reg_state(regs, ce, engine, ce->ring); + GEM_BUG_ON(!intel_context_is_pinned(ce)); + restore_default_state(ce, engine); out_replay: - GEM_TRACE("%s replay {head:%04x, tail:%04x\n", + GEM_TRACE("%s replay {head:%04x, tail:%04x}\n", engine->name, ce->ring->head, ce->ring->tail); intel_ring_update_space(ce->ring); + __execlists_reset_reg_state(ce, engine); __execlists_update_reg_state(ce, engine); + ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */ unwind: /* Push back any incomplete requests for replay after the reset. */ @@ -2749,7 +3281,7 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode) } *cs++ = cmd; - *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT; + *cs++ = LRC_PPHWSP_SCRATCH_ADDR; *cs++ = 0; /* upper addr */ *cs++ = 0; /* value */ intel_ring_advance(request, cs); @@ -2760,10 +3292,6 @@ static int gen8_emit_flush(struct i915_request *request, u32 mode) static int gen8_emit_flush_render(struct i915_request *request, u32 mode) { - struct intel_engine_cs *engine = request->engine; - u32 scratch_addr = - intel_gt_scratch_offset(engine->gt, - INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); bool vf_flush_wa = false, dc_flush_wa = false; u32 *cs, flags = 0; int len; @@ -2785,7 +3313,7 @@ static int gen8_emit_flush_render(struct i915_request *request, flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; /* * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL @@ -2818,7 +3346,7 @@ static int gen8_emit_flush_render(struct i915_request *request, cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE, 0); - cs = gen8_emit_pipe_control(cs, flags, scratch_addr); + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); if (dc_flush_wa) cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0); @@ -2831,11 +3359,6 @@ static int gen8_emit_flush_render(struct i915_request *request, static int gen11_emit_flush_render(struct i915_request *request, u32 mode) { - struct intel_engine_cs *engine = request->engine; - const u32 scratch_addr = - intel_gt_scratch_offset(engine->gt, - INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH); - if (mode & EMIT_FLUSH) { u32 *cs; u32 flags = 0; @@ -2848,13 +3371,13 @@ static int gen11_emit_flush_render(struct i915_request *request, flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; flags |= PIPE_CONTROL_FLUSH_ENABLE; flags |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; cs = intel_ring_begin(request, 6); if (IS_ERR(cs)) return PTR_ERR(cs); - cs = gen8_emit_pipe_control(cs, flags, scratch_addr); + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); intel_ring_advance(request, cs); } @@ -2872,14 +3395,106 @@ static int gen11_emit_flush_render(struct i915_request *request, flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; flags |= PIPE_CONTROL_QW_WRITE; - flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + + cs = intel_ring_begin(request, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); + intel_ring_advance(request, cs); + } + + return 0; +} + +static u32 preparser_disable(bool state) +{ + return MI_ARB_CHECK | 1 << 8 | state; +} + +static int gen12_emit_flush_render(struct i915_request *request, + u32 mode) +{ + if (mode & EMIT_FLUSH) { + u32 flags = 0; + u32 *cs; + + flags |= PIPE_CONTROL_TILE_CACHE_FLUSH; + flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; + flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; + /* Wa_1409600907:tgl */ + flags |= PIPE_CONTROL_DEPTH_STALL; + flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; + flags |= PIPE_CONTROL_FLUSH_ENABLE; + flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH; + + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + flags |= PIPE_CONTROL_QW_WRITE; + + flags |= PIPE_CONTROL_CS_STALL; cs = intel_ring_begin(request, 6); if (IS_ERR(cs)) return PTR_ERR(cs); - cs = gen8_emit_pipe_control(cs, flags, scratch_addr); + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); + intel_ring_advance(request, cs); + } + + if (mode & EMIT_INVALIDATE) { + u32 flags = 0; + u32 *cs; + + flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TLB_INVALIDATE; + flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; + flags |= PIPE_CONTROL_L3_RO_CACHE_INVALIDATE; + + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + flags |= PIPE_CONTROL_QW_WRITE; + + flags |= PIPE_CONTROL_CS_STALL; + + cs = intel_ring_begin(request, 8); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + /* + * Prevent the pre-parser from skipping past the TLB + * invalidate and loading a stale page for the batch + * buffer / request payload. + */ + *cs++ = preparser_disable(true); + + cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR); + + *cs++ = preparser_disable(false); intel_ring_advance(request, cs); + + /* + * Wa_1604544889:tgl + */ + if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) { + flags = 0; + flags |= PIPE_CONTROL_CS_STALL; + flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH; + + flags |= PIPE_CONTROL_STORE_DATA_INDEX; + flags |= PIPE_CONTROL_QW_WRITE; + + cs = intel_ring_begin(request, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + cs = gen8_emit_pipe_control(cs, flags, + LRC_PPHWSP_SCRATCH_ADDR); + intel_ring_advance(request, cs); + } } return 0; @@ -2933,7 +3548,7 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) { cs = gen8_emit_ggtt_write(cs, request->fence.seqno, - request->timeline->hwsp_offset, + i915_request_active_timeline(request)->hwsp_offset, 0); return gen8_emit_fini_breadcrumb_footer(request, cs); @@ -2941,28 +3556,28 @@ static u32 *gen8_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) static u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) { - cs = gen8_emit_ggtt_write_rcs(cs, - request->fence.seqno, - request->timeline->hwsp_offset, - PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | - PIPE_CONTROL_DEPTH_CACHE_FLUSH | - PIPE_CONTROL_DC_FLUSH_ENABLE); - - /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ cs = gen8_emit_pipe_control(cs, - PIPE_CONTROL_FLUSH_ENABLE | - PIPE_CONTROL_CS_STALL, + PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + PIPE_CONTROL_DC_FLUSH_ENABLE, 0); + /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */ + cs = gen8_emit_ggtt_write_rcs(cs, + request->fence.seqno, + i915_request_active_timeline(request)->hwsp_offset, + PIPE_CONTROL_FLUSH_ENABLE | + PIPE_CONTROL_CS_STALL); + return gen8_emit_fini_breadcrumb_footer(request, cs); } -static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, - u32 *cs) +static u32 * +gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) { cs = gen8_emit_ggtt_write_rcs(cs, request->fence.seqno, - request->timeline->hwsp_offset, + i915_request_active_timeline(request)->hwsp_offset, PIPE_CONTROL_CS_STALL | PIPE_CONTROL_TILE_CACHE_FLUSH | PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | @@ -2973,9 +3588,88 @@ static u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *request, return gen8_emit_fini_breadcrumb_footer(request, cs); } +/* + * Note that the CS instruction pre-parser will not stall on the breadcrumb + * flush and will continue pre-fetching the instructions after it before the + * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at + * BB_START/END instructions, so, even though we might pre-fetch the pre-amble + * of the next request before the memory has been flushed, we're guaranteed that + * we won't access the batch itself too early. + * However, on gen12+ the parser can pre-fetch across the BB_START/END commands, + * so, if the current request is modifying an instruction in the next request on + * the same intel_context, we might pre-fetch and then execute the pre-update + * instruction. To avoid this, the users of self-modifying code should either + * disable the parser around the code emitting the memory writes, via a new flag + * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For + * the in-kernel use-cases we've opted to use a separate context, see + * reloc_gpu() as an example. + * All the above applies only to the instructions themselves. Non-inline data + * used by the instructions is not pre-fetched. + */ + +static u32 *gen12_emit_preempt_busywait(struct i915_request *request, u32 *cs) +{ + *cs++ = MI_SEMAPHORE_WAIT_TOKEN | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_EQ_SDD; + *cs++ = 0; + *cs++ = intel_hws_preempt_address(request->engine); + *cs++ = 0; + *cs++ = 0; + *cs++ = MI_NOOP; + + return cs; +} + +static __always_inline u32* +gen12_emit_fini_breadcrumb_footer(struct i915_request *request, u32 *cs) +{ + *cs++ = MI_USER_INTERRUPT; + + *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; + if (intel_engine_has_semaphores(request->engine)) + cs = gen12_emit_preempt_busywait(request, cs); + + request->tail = intel_ring_offset(request, cs); + assert_ring_tail_valid(request->ring, request->tail); + + return gen8_emit_wa_tail(request, cs); +} + +static u32 *gen12_emit_fini_breadcrumb(struct i915_request *request, u32 *cs) +{ + cs = gen8_emit_ggtt_write(cs, + request->fence.seqno, + i915_request_active_timeline(request)->hwsp_offset, + 0); + + return gen12_emit_fini_breadcrumb_footer(request, cs); +} + +static u32 * +gen12_emit_fini_breadcrumb_rcs(struct i915_request *request, u32 *cs) +{ + cs = gen8_emit_ggtt_write_rcs(cs, + request->fence.seqno, + i915_request_active_timeline(request)->hwsp_offset, + PIPE_CONTROL_CS_STALL | + PIPE_CONTROL_TILE_CACHE_FLUSH | + PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | + PIPE_CONTROL_DEPTH_CACHE_FLUSH | + /* Wa_1409600907:tgl */ + PIPE_CONTROL_DEPTH_STALL | + PIPE_CONTROL_DC_FLUSH_ENABLE | + PIPE_CONTROL_FLUSH_ENABLE | + PIPE_CONTROL_HDC_PIPELINE_FLUSH); + + return gen12_emit_fini_breadcrumb_footer(request, cs); +} + static void execlists_park(struct intel_engine_cs *engine) { - del_timer(&engine->execlists.timer); + cancel_timer(&engine->execlists.timer); + cancel_timer(&engine->execlists.preempt); } void intel_execlists_set_default_submission(struct intel_engine_cs *engine) @@ -2998,6 +3692,9 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine) if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) engine->flags |= I915_ENGINE_HAS_PREEMPTION; } + + if (INTEL_GEN(engine->i915) >= 12) + engine->flags |= I915_ENGINE_HAS_RELATIVE_MMIO; } static void execlists_destroy(struct intel_engine_cs *engine) @@ -3025,6 +3722,8 @@ logical_ring_default_vfuncs(struct intel_engine_cs *engine) engine->emit_flush = gen8_emit_flush; engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb; + if (INTEL_GEN(engine->i915) >= 12) + engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb; engine->set_default_submission = intel_execlists_set_default_submission; @@ -3070,6 +3769,9 @@ static void rcs_submission_override(struct intel_engine_cs *engine) { switch (INTEL_GEN(engine->i915)) { case 12: + engine->emit_flush = gen12_emit_flush_render; + engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; + break; case 11: engine->emit_flush = gen11_emit_flush_render; engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs; @@ -3085,7 +3787,8 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine) { tasklet_init(&engine->execlists.tasklet, execlists_submission_tasklet, (unsigned long)engine); - timer_setup(&engine->execlists.timer, execlists_submission_timer, 0); + timer_setup(&engine->execlists.timer, execlists_timeslice, 0); + timer_setup(&engine->execlists.preempt, execlists_preempt, 0); logical_ring_default_vfuncs(engine); logical_ring_default_irqs(engine); @@ -3142,7 +3845,7 @@ int intel_execlists_submission_init(struct intel_engine_cs *engine) return 0; } -static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) +static u32 intel_lr_indirect_ctx_offset(const struct intel_engine_cs *engine) { u32 indirect_ctx_offset; @@ -3175,86 +3878,50 @@ static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine) return indirect_ctx_offset; } -static void execlists_init_reg_state(u32 *regs, - struct intel_context *ce, - struct intel_engine_cs *engine, - struct intel_ring *ring) -{ - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm); - bool rcs = engine->class == RENDER_CLASS; - u32 base = engine->mmio_base; - /* - * A context is actually a big batch buffer with several - * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The - * values we are setting here are only for the first context restore: - * on a subsequent save, the GPU will recreate this batchbuffer with new - * values (including all the missing MI_LOAD_REGISTER_IMM commands that - * we are not initializing here). - * - * Must keep consistent with virtual_update_register_offsets(). - */ - regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) | - MI_LRI_FORCE_POSTED; - - CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(base), +static void init_common_reg_state(u32 * const regs, + const struct intel_engine_cs *engine, + const struct intel_ring *ring) +{ + regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | - _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH)); - if (INTEL_GEN(engine->i915) < 11) { - regs[CTX_CONTEXT_CONTROL + 1] |= + _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH); + if (INTEL_GEN(engine->i915) < 11) + regs[CTX_CONTEXT_CONTROL] |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT | CTX_CTRL_RS_CTX_ENABLE); - } - CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0); - CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0); - CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0); - CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base), - RING_CTL_SIZE(ring->size) | RING_VALID); - CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0); - CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0); - CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT); - CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0); - CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0); - CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0); - if (rcs) { - struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx; - - CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0); - CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET, - RING_INDIRECT_CTX_OFFSET(base), 0); - if (wa_ctx->indirect_ctx.size) { - u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); - - regs[CTX_RCS_INDIRECT_CTX + 1] = - (ggtt_offset + wa_ctx->indirect_ctx.offset) | - (wa_ctx->indirect_ctx.size / CACHELINE_BYTES); - - regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] = - intel_lr_indirect_ctx_offset(engine) << 6; - } - CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0); - if (wa_ctx->per_ctx.size) { - u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); + regs[CTX_RING_BUFFER_CONTROL] = RING_CTL_SIZE(ring->size) | RING_VALID; + regs[CTX_BB_STATE] = RING_BB_PPGTT; +} - regs[CTX_BB_PER_CTX_PTR + 1] = - (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; - } +static void init_wa_bb_reg_state(u32 * const regs, + const struct intel_engine_cs *engine, + u32 pos_bb_per_ctx) +{ + const struct i915_ctx_workarounds * const wa_ctx = &engine->wa_ctx; + + if (wa_ctx->per_ctx.size) { + const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); + + regs[pos_bb_per_ctx] = + (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01; } - regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED; + if (wa_ctx->indirect_ctx.size) { + const u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma); + + regs[pos_bb_per_ctx + 2] = + (ggtt_offset + wa_ctx->indirect_ctx.offset) | + (wa_ctx->indirect_ctx.size / CACHELINE_BYTES); - CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0); - /* PDP values well be assigned later if needed */ - CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(base, 3), 0); - CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(base, 3), 0); - CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(base, 2), 0); - CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(base, 2), 0); - CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(base, 1), 0); - CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(base, 1), 0); - CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(base, 0), 0); - CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(base, 0), 0); + regs[pos_bb_per_ctx + 4] = + intel_lr_indirect_ctx_offset(engine) << 6; + } +} +static void init_ppgtt_reg_state(u32 *regs, const struct i915_ppgtt *ppgtt) +{ if (i915_vm_is_4lvl(&ppgtt->vm)) { /* 64b PPGTT (48bit canonical) * PDP0_DESCRIPTOR contains the base address to PML4 and @@ -3267,15 +3934,47 @@ static void execlists_init_reg_state(u32 *regs, ASSIGN_CTX_PDP(ppgtt, regs, 1); ASSIGN_CTX_PDP(ppgtt, regs, 0); } +} - if (rcs) { - regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1); - CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, 0); +static struct i915_ppgtt *vm_alias(struct i915_address_space *vm) +{ + if (i915_is_ggtt(vm)) + return i915_vm_to_ggtt(vm)->alias; + else + return i915_vm_to_ppgtt(vm); +} + +static void execlists_init_reg_state(u32 *regs, + const struct intel_context *ce, + const struct intel_engine_cs *engine, + const struct intel_ring *ring, + bool close) +{ + /* + * A context is actually a big batch buffer with several + * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The + * values we are setting here are only for the first context restore: + * on a subsequent save, the GPU will recreate this batchbuffer with new + * values (including all the missing MI_LOAD_REGISTER_IMM commands that + * we are not initializing here). + * + * Must keep consistent with virtual_update_register_offsets(). + */ + u32 *bbe = set_offsets(regs, reg_offsets(engine), engine); + + if (close) { /* Close the batch; used mainly by live_lrc_layout() */ + *bbe = MI_BATCH_BUFFER_END; + if (INTEL_GEN(engine->i915) >= 10) + *bbe |= BIT(0); } - regs[CTX_END] = MI_BATCH_BUFFER_END; - if (INTEL_GEN(engine->i915) >= 10) - regs[CTX_END] |= BIT(0); + init_common_reg_state(regs, engine, ring); + init_ppgtt_reg_state(regs, vm_alias(ce->vm)); + + init_wa_bb_reg_state(regs, engine, + INTEL_GEN(engine->i915) >= 12 ? + GEN12_CTX_BB_PER_CTX_PTR : + CTX_BB_PER_CTX_PTR); } static int @@ -3284,6 +3983,7 @@ populate_lr_context(struct intel_context *ce, struct intel_engine_cs *engine, struct intel_ring *ring) { + bool inhibit = true; void *vaddr; u32 *regs; int ret; @@ -3298,12 +3998,6 @@ populate_lr_context(struct intel_context *ce, set_redzone(vaddr, engine); if (engine->default_state) { - /* - * We only want to copy over the template context state; - * skipping over the headers reserved for GuC communication, - * leaving those as zero. - */ - const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE; void *defaults; defaults = i915_gem_object_pin_map(engine->default_state, @@ -3313,23 +4007,22 @@ populate_lr_context(struct intel_context *ce, goto err_unpin_ctx; } - memcpy(vaddr + start, defaults + start, engine->context_size); + memcpy(vaddr, defaults, engine->context_size); i915_gem_object_unpin_map(engine->default_state); + inhibit = false; } /* The second page of the context object contains some fields which must * be set up prior to the first execution. */ regs = vaddr + LRC_STATE_PN * PAGE_SIZE; - execlists_init_reg_state(regs, ce, engine, ring); - if (!engine->default_state) - regs[CTX_CONTEXT_CONTROL + 1] |= + execlists_init_reg_state(regs, ce, engine, ring, inhibit); + if (inhibit) + regs[CTX_CONTEXT_CONTROL] |= _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); ret = 0; err_unpin_ctx: - __i915_gem_object_flush_map(ctx_obj, - LRC_HEADER_PAGES * PAGE_SIZE, - engine->context_size); + __i915_gem_object_flush_map(ctx_obj, 0, engine->context_size); i915_gem_object_unpin_map(ctx_obj); return ret; } @@ -3346,11 +4039,6 @@ static int __execlists_context_alloc(struct intel_context *ce, GEM_BUG_ON(ce->state); context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE); - /* - * Before the actual start of the context image, we insert a few pages - * for our own use and for sharing with the GuC. - */ - context_size += LRC_HEADER_PAGES * PAGE_SIZE; if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) context_size += I915_GTT_PAGE_SIZE; /* for redzone */ @@ -3462,8 +4150,9 @@ static void virtual_engine_initial_hint(struct virtual_engine *ve) return; swap(ve->siblings[swp], ve->siblings[0]); - virtual_update_register_offsets(ve->context.lrc_reg_state, - ve->siblings[0]); + if (!intel_engine_has_relative_mmio(ve->siblings[0])) + virtual_update_register_offsets(ve->context.lrc_reg_state, + ve->siblings[0]); } static int virtual_context_pin(struct intel_context *ce) @@ -3714,6 +4403,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, ve->base.i915 = ctx->i915; ve->base.gt = siblings[0]->gt; + ve->base.uncore = siblings[0]->uncore; ve->base.id = -1; ve->base.class = OTHER_CLASS; ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; @@ -3737,6 +4427,7 @@ intel_execlists_create_virtual(struct i915_gem_context *ctx, snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); intel_engine_init_active(&ve->base, ENGINE_VIRTUAL); + intel_engine_init_breadcrumbs(&ve->base); intel_engine_init_execlists(&ve->base); @@ -3899,6 +4590,18 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, return 0; } +struct intel_engine_cs * +intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, + unsigned int sibling) +{ + struct virtual_engine *ve = to_virtual_engine(engine); + + if (sibling >= ve->num_siblings) + return NULL; + + return ve->siblings[sibling]; +} + void intel_execlists_show_requests(struct intel_engine_cs *engine, struct drm_printer *m, void (*show_request)(struct drm_printer *m, @@ -3987,6 +4690,8 @@ void intel_lr_context_reset(struct intel_engine_cs *engine, u32 head, bool scrub) { + GEM_BUG_ON(!intel_context_is_pinned(ce)); + /* * We want a simple context + ring to execute the breadcrumb update. * We cannot rely on the context being intact across the GPU hang, @@ -3995,16 +4700,8 @@ void intel_lr_context_reset(struct intel_engine_cs *engine, * future request will be after userspace has had the opportunity * to recreate its own state. */ - if (scrub) { - u32 *regs = ce->lrc_reg_state; - - if (engine->pinned_default_state) { - memcpy(regs, /* skip restoring the vanilla PPHWSP */ - engine->pinned_default_state + LRC_STATE_PN * PAGE_SIZE, - engine->context_size - PAGE_SIZE); - } - execlists_init_reg_state(regs, ce, engine, ce->ring); - } + if (scrub) + restore_default_state(ce, engine); /* Rerun the request; its payload has been neutered (if guilty). */ ce->ring->head = head; @@ -4013,6 +4710,13 @@ void intel_lr_context_reset(struct intel_engine_cs *engine, __execlists_update_reg_state(ce, engine); } +bool +intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine) +{ + return engine->set_default_submission == + intel_execlists_set_default_submission; +} + #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftest_lrc.c" #endif diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.h b/drivers/gpu/drm/i915/gt/intel_lrc.h index c2bba82bcc16..04511d8ebdc1 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc.h @@ -43,6 +43,7 @@ struct intel_engine_cs; #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT (1 << 0) #define CTX_CTRL_RS_CTX_ENABLE (1 << 1) #define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT (1 << 2) +#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE (1 << 8) #define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0) #define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510) #define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550) @@ -66,6 +67,12 @@ struct intel_engine_cs; #define GEN11_CSB_READ_PTR_MASK (GEN11_CSB_PTR_MASK << 8) #define GEN11_CSB_WRITE_PTR_MASK (GEN11_CSB_PTR_MASK << 0) +#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ +#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */ +#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */ +/* in Gen12 ID 0x7FF is reserved to indicate idle */ +#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1) + enum { INTEL_CONTEXT_SCHEDULE_IN = 0, INTEL_CONTEXT_SCHEDULE_OUT, @@ -79,30 +86,15 @@ int intel_execlists_submission_setup(struct intel_engine_cs *engine); int intel_execlists_submission_init(struct intel_engine_cs *engine); /* Logical Ring Contexts */ - -/* - * We allocate a header at the start of the context image for our own - * use, therefore the actual location of the logical state is offset - * from the start of the VMA. The layout is - * - * | [guc] | [hwsp] [logical state] | - * |<- our header ->|<- context image ->| - * - */ -/* The first page is used for sharing data with the GuC */ -#define LRC_GUCSHR_PN (0) -#define LRC_GUCSHR_SZ (1) /* At the start of the context image is its per-process HWS page */ -#define LRC_PPHWSP_PN (LRC_GUCSHR_PN + LRC_GUCSHR_SZ) +#define LRC_PPHWSP_PN (0) #define LRC_PPHWSP_SZ (1) -/* Finally we have the logical state for the context */ +/* After the PPHWSP we have the logical state for the context */ #define LRC_STATE_PN (LRC_PPHWSP_PN + LRC_PPHWSP_SZ) -/* - * Currently we include the PPHWSP in __intel_engine_context_size() so - * the size of the header is synonymous with the start of the PPHWSP. - */ -#define LRC_HEADER_PAGES LRC_PPHWSP_PN +/* Space within PPHWSP reserved to be used as scratch */ +#define LRC_PPHWSP_SCRATCH 0x34 +#define LRC_PPHWSP_SCRATCH_ADDR (LRC_PPHWSP_SCRATCH * sizeof(u32)) void intel_execlists_set_default_submission(struct intel_engine_cs *engine); @@ -131,4 +123,11 @@ int intel_virtual_engine_attach_bond(struct intel_engine_cs *engine, const struct intel_engine_cs *master, const struct intel_engine_cs *sibling); +struct intel_engine_cs * +intel_virtual_engine_get_sibling(struct intel_engine_cs *engine, + unsigned int sibling); + +bool +intel_engine_in_execlists_submission_mode(const struct intel_engine_cs *engine); + #endif /* _INTEL_LRC_H_ */ diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h index b8f20ad71169..06ab0276e10e 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h @@ -9,55 +9,41 @@ #include <linux/types.h> -/* GEN8+ Reg State Context */ -#define CTX_LRI_HEADER_0 0x01 -#define CTX_CONTEXT_CONTROL 0x02 -#define CTX_RING_HEAD 0x04 -#define CTX_RING_TAIL 0x06 -#define CTX_RING_BUFFER_START 0x08 -#define CTX_RING_BUFFER_CONTROL 0x0a -#define CTX_BB_HEAD_U 0x0c -#define CTX_BB_HEAD_L 0x0e -#define CTX_BB_STATE 0x10 -#define CTX_SECOND_BB_HEAD_U 0x12 -#define CTX_SECOND_BB_HEAD_L 0x14 -#define CTX_SECOND_BB_STATE 0x16 -#define CTX_BB_PER_CTX_PTR 0x18 -#define CTX_RCS_INDIRECT_CTX 0x1a -#define CTX_RCS_INDIRECT_CTX_OFFSET 0x1c -#define CTX_LRI_HEADER_1 0x21 -#define CTX_CTX_TIMESTAMP 0x22 -#define CTX_PDP3_UDW 0x24 -#define CTX_PDP3_LDW 0x26 -#define CTX_PDP2_UDW 0x28 -#define CTX_PDP2_LDW 0x2a -#define CTX_PDP1_UDW 0x2c -#define CTX_PDP1_LDW 0x2e -#define CTX_PDP0_UDW 0x30 -#define CTX_PDP0_LDW 0x32 -#define CTX_LRI_HEADER_2 0x41 -#define CTX_R_PWR_CLK_STATE 0x42 -#define CTX_END 0x44 - -#define CTX_REG(reg_state, pos, reg, val) do { \ - u32 *reg_state__ = (reg_state); \ - const u32 pos__ = (pos); \ - (reg_state__)[(pos__) + 0] = i915_mmio_reg_offset(reg); \ - (reg_state__)[(pos__) + 1] = (val); \ -} while (0) +/* GEN8 to GEN11 Reg State Context */ +#define CTX_CONTEXT_CONTROL (0x02 + 1) +#define CTX_RING_HEAD (0x04 + 1) +#define CTX_RING_TAIL (0x06 + 1) +#define CTX_RING_BUFFER_START (0x08 + 1) +#define CTX_RING_BUFFER_CONTROL (0x0a + 1) +#define CTX_BB_STATE (0x10 + 1) +#define CTX_BB_PER_CTX_PTR (0x18 + 1) +#define CTX_PDP3_UDW (0x24 + 1) +#define CTX_PDP3_LDW (0x26 + 1) +#define CTX_PDP2_UDW (0x28 + 1) +#define CTX_PDP2_LDW (0x2a + 1) +#define CTX_PDP1_UDW (0x2c + 1) +#define CTX_PDP1_LDW (0x2e + 1) +#define CTX_PDP0_UDW (0x30 + 1) +#define CTX_PDP0_LDW (0x32 + 1) +#define CTX_R_PWR_CLK_STATE (0x42 + 1) + +#define GEN9_CTX_RING_MI_MODE 0x54 + +/* GEN12+ Reg State Context */ +#define GEN12_CTX_BB_PER_CTX_PTR (0x12 + 1) #define ASSIGN_CTX_PDP(ppgtt, reg_state, n) do { \ u32 *reg_state__ = (reg_state); \ const u64 addr__ = i915_page_dir_dma_addr((ppgtt), (n)); \ - (reg_state__)[CTX_PDP ## n ## _UDW + 1] = upper_32_bits(addr__); \ - (reg_state__)[CTX_PDP ## n ## _LDW + 1] = lower_32_bits(addr__); \ + (reg_state__)[CTX_PDP ## n ## _UDW] = upper_32_bits(addr__); \ + (reg_state__)[CTX_PDP ## n ## _LDW] = lower_32_bits(addr__); \ } while (0) #define ASSIGN_CTX_PML4(ppgtt, reg_state) do { \ u32 *reg_state__ = (reg_state); \ const u64 addr__ = px_dma(ppgtt->pd); \ - (reg_state__)[CTX_PDP0_UDW + 1] = upper_32_bits(addr__); \ - (reg_state__)[CTX_PDP0_LDW + 1] = lower_32_bits(addr__); \ + (reg_state__)[CTX_PDP0_UDW] = upper_32_bits(addr__); \ + (reg_state__)[CTX_PDP0_LDW] = lower_32_bits(addr__); \ } while (0) #define GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT 0x17 diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c index cea184a7dde9..2b977991b785 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.c +++ b/drivers/gpu/drm/i915/gt/intel_mocs.c @@ -26,6 +26,7 @@ #include "intel_gt.h" #include "intel_mocs.h" #include "intel_lrc.h" +#include "intel_ring.h" /* structures required */ struct drm_i915_mocs_entry { @@ -279,10 +280,9 @@ static const struct drm_i915_mocs_entry icelake_mocs_table[] = { GEN11_MOCS_ENTRIES }; -static bool get_mocs_settings(struct intel_gt *gt, +static bool get_mocs_settings(const struct drm_i915_private *i915, struct drm_i915_mocs_table *table) { - struct drm_i915_private *i915 = gt->i915; bool result = false; if (INTEL_GEN(i915) >= 12) { @@ -323,9 +323,9 @@ static bool get_mocs_settings(struct intel_gt *gt, return result; } -static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index) +static i915_reg_t mocs_register(const struct intel_engine_cs *engine, int index) { - switch (engine_id) { + switch (engine->id) { case RCS0: return GEN9_GFX_MOCS(index); case VCS0: @@ -339,7 +339,7 @@ static i915_reg_t mocs_register(enum intel_engine_id engine_id, int index) case VCS2: return GEN11_MFX2_MOCS(index); default: - MISSING_CASE(engine_id); + MISSING_CASE(engine->id); return INVALID_MMIO_REG; } } @@ -357,118 +357,25 @@ static u32 get_entry_control(const struct drm_i915_mocs_table *table, return table->table[I915_MOCS_PTE].control_value; } -/** - * intel_mocs_init_engine() - emit the mocs control table - * @engine: The engine for whom to emit the registers. - * - * This function simply emits a MI_LOAD_REGISTER_IMM command for the - * given table starting at the given address. - */ -void intel_mocs_init_engine(struct intel_engine_cs *engine) +static void init_mocs_table(struct intel_engine_cs *engine, + const struct drm_i915_mocs_table *table) { - struct intel_gt *gt = engine->gt; - struct intel_uncore *uncore = gt->uncore; - struct drm_i915_mocs_table table; - unsigned int index; - u32 unused_value; - - /* Platforms with global MOCS do not need per-engine initialization. */ - if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915)) - return; - - /* Called under a blanket forcewake */ - assert_forcewakes_active(uncore, FORCEWAKE_ALL); - - if (!get_mocs_settings(gt, &table)) - return; - - /* Set unused values to PTE */ - unused_value = table.table[I915_MOCS_PTE].control_value; - - for (index = 0; index < table.size; index++) { - u32 value = get_entry_control(&table, index); + struct intel_uncore *uncore = engine->uncore; + u32 unused_value = table->table[I915_MOCS_PTE].control_value; + unsigned int i; + for (i = 0; i < table->size; i++) intel_uncore_write_fw(uncore, - mocs_register(engine->id, index), - value); - } + mocs_register(engine, i), + get_entry_control(table, i)); - /* All remaining entries are also unused */ - for (; index < table.n_entries; index++) + /* All remaining entries are unused */ + for (; i < table->n_entries; i++) intel_uncore_write_fw(uncore, - mocs_register(engine->id, index), + mocs_register(engine, i), unused_value); } -static void intel_mocs_init_global(struct intel_gt *gt) -{ - struct intel_uncore *uncore = gt->uncore; - struct drm_i915_mocs_table table; - unsigned int index; - - GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915)); - - if (!get_mocs_settings(gt, &table)) - return; - - if (GEM_DEBUG_WARN_ON(table.size > table.n_entries)) - return; - - for (index = 0; index < table.size; index++) - intel_uncore_write(uncore, - GEN12_GLOBAL_MOCS(index), - table.table[index].control_value); - - /* - * Ok, now set the unused entries to the invalid entry (index 0). These - * entries are officially undefined and no contract for the contents and - * settings is given for these entries. - */ - for (; index < table.n_entries; index++) - intel_uncore_write(uncore, - GEN12_GLOBAL_MOCS(index), - table.table[0].control_value); -} - -static int emit_mocs_control_table(struct i915_request *rq, - const struct drm_i915_mocs_table *table) -{ - enum intel_engine_id engine = rq->engine->id; - unsigned int index; - u32 unused_value; - u32 *cs; - - if (GEM_WARN_ON(table->size > table->n_entries)) - return -ENODEV; - - /* Set unused values to PTE */ - unused_value = table->table[I915_MOCS_PTE].control_value; - - cs = intel_ring_begin(rq, 2 + 2 * table->n_entries); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries); - - for (index = 0; index < table->size; index++) { - u32 value = get_entry_control(table, index); - - *cs++ = i915_mmio_reg_offset(mocs_register(engine, index)); - *cs++ = value; - } - - /* All remaining entries are also unused */ - for (; index < table->n_entries; index++) { - *cs++ = i915_mmio_reg_offset(mocs_register(engine, index)); - *cs++ = unused_value; - } - - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); - - return 0; -} - /* * Get l3cc_value from MOCS entry taking into account when it's not used: * I915_MOCS_PTE's value is returned in this case. @@ -486,141 +393,99 @@ static inline u32 l3cc_combine(const struct drm_i915_mocs_table *table, u16 low, u16 high) { - return low | high << 16; + return low | (u32)high << 16; } -static int emit_mocs_l3cc_table(struct i915_request *rq, - const struct drm_i915_mocs_table *table) +static void init_l3cc_table(struct intel_engine_cs *engine, + const struct drm_i915_mocs_table *table) { - u16 unused_value; + struct intel_uncore *uncore = engine->uncore; + u16 unused_value = table->table[I915_MOCS_PTE].l3cc_value; unsigned int i; - u32 *cs; - - if (GEM_WARN_ON(table->size > table->n_entries)) - return -ENODEV; - - /* Set unused values to PTE */ - unused_value = table->table[I915_MOCS_PTE].l3cc_value; - - cs = intel_ring_begin(rq, 2 + table->n_entries); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - *cs++ = MI_LOAD_REGISTER_IMM(table->n_entries / 2); for (i = 0; i < table->size / 2; i++) { u16 low = get_entry_l3cc(table, 2 * i); u16 high = get_entry_l3cc(table, 2 * i + 1); - *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i)); - *cs++ = l3cc_combine(table, low, high); + intel_uncore_write(uncore, + GEN9_LNCFCMOCS(i), + l3cc_combine(table, low, high)); } /* Odd table size - 1 left over */ - if (table->size & 0x01) { + if (table->size & 1) { u16 low = get_entry_l3cc(table, 2 * i); - *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i)); - *cs++ = l3cc_combine(table, low, unused_value); + intel_uncore_write(uncore, + GEN9_LNCFCMOCS(i), + l3cc_combine(table, low, unused_value)); i++; } /* All remaining entries are also unused */ - for (; i < table->n_entries / 2; i++) { - *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(i)); - *cs++ = l3cc_combine(table, unused_value, unused_value); - } + for (; i < table->n_entries / 2; i++) + intel_uncore_write(uncore, + GEN9_LNCFCMOCS(i), + l3cc_combine(table, unused_value, + unused_value)); +} + +void intel_mocs_init_engine(struct intel_engine_cs *engine) +{ + struct drm_i915_mocs_table table; + + /* Called under a blanket forcewake */ + assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); - *cs++ = MI_NOOP; - intel_ring_advance(rq, cs); + if (!get_mocs_settings(engine->i915, &table)) + return; + + /* Platforms with global MOCS do not need per-engine initialization. */ + if (!HAS_GLOBAL_MOCS_REGISTERS(engine->i915)) + init_mocs_table(engine, &table); - return 0; + if (engine->class == RENDER_CLASS) + init_l3cc_table(engine, &table); } -static void intel_mocs_init_l3cc_table(struct intel_gt *gt) +static void intel_mocs_init_global(struct intel_gt *gt) { struct intel_uncore *uncore = gt->uncore; struct drm_i915_mocs_table table; - unsigned int i; - u16 unused_value; + unsigned int index; - if (!get_mocs_settings(gt, &table)) + /* + * LLC and eDRAM control values are not applicable to dgfx + */ + if (IS_DGFX(gt->i915)) return; - /* Set unused values to PTE */ - unused_value = table.table[I915_MOCS_PTE].l3cc_value; - - for (i = 0; i < table.size / 2; i++) { - u16 low = get_entry_l3cc(&table, 2 * i); - u16 high = get_entry_l3cc(&table, 2 * i + 1); + GEM_BUG_ON(!HAS_GLOBAL_MOCS_REGISTERS(gt->i915)); - intel_uncore_write(uncore, - GEN9_LNCFCMOCS(i), - l3cc_combine(&table, low, high)); - } + if (!get_mocs_settings(gt->i915, &table)) + return; - /* Odd table size - 1 left over */ - if (table.size & 0x01) { - u16 low = get_entry_l3cc(&table, 2 * i); + if (GEM_DEBUG_WARN_ON(table.size > table.n_entries)) + return; + for (index = 0; index < table.size; index++) intel_uncore_write(uncore, - GEN9_LNCFCMOCS(i), - l3cc_combine(&table, low, unused_value)); - i++; - } + GEN12_GLOBAL_MOCS(index), + table.table[index].control_value); - /* All remaining entries are also unused */ - for (; i < table.n_entries / 2; i++) + /* + * Ok, now set the unused entries to the invalid entry (index 0). These + * entries are officially undefined and no contract for the contents and + * settings is given for these entries. + */ + for (; index < table.n_entries; index++) intel_uncore_write(uncore, - GEN9_LNCFCMOCS(i), - l3cc_combine(&table, unused_value, - unused_value)); -} - -/** - * intel_mocs_emit() - program the MOCS register. - * @rq: Request to use to set up the MOCS tables. - * - * This function will emit a batch buffer with the values required for - * programming the MOCS register values for all the currently supported - * rings. - * - * These registers are partially stored in the RCS context, so they are - * emitted at the same time so that when a context is created these registers - * are set up. These registers have to be emitted into the start of the - * context as setting the ELSP will re-init some of these registers back - * to the hw values. - * - * Return: 0 on success, otherwise the error status. - */ -int intel_mocs_emit(struct i915_request *rq) -{ - struct drm_i915_mocs_table t; - int ret; - - if (HAS_GLOBAL_MOCS_REGISTERS(rq->i915) || - rq->engine->class != RENDER_CLASS) - return 0; - - if (get_mocs_settings(rq->engine->gt, &t)) { - /* Program the RCS control registers */ - ret = emit_mocs_control_table(rq, &t); - if (ret) - return ret; - - /* Now program the l3cc registers */ - ret = emit_mocs_l3cc_table(rq, &t); - if (ret) - return ret; - } - - return 0; + GEN12_GLOBAL_MOCS(index), + table.table[0].control_value); } void intel_mocs_init(struct intel_gt *gt) { - intel_mocs_init_l3cc_table(gt); - if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915)) intel_mocs_init_global(gt); } diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.h b/drivers/gpu/drm/i915/gt/intel_mocs.h index 2ae816b7ca19..83371f3e6ba1 100644 --- a/drivers/gpu/drm/i915/gt/intel_mocs.h +++ b/drivers/gpu/drm/i915/gt/intel_mocs.h @@ -49,13 +49,10 @@ * context handling keep the MOCS in step. */ -struct i915_request; struct intel_engine_cs; struct intel_gt; void intel_mocs_init(struct intel_gt *gt); void intel_mocs_init_engine(struct intel_engine_cs *engine); -int intel_mocs_emit(struct i915_request *rq); - #endif diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c new file mode 100644 index 000000000000..700104b90163 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -0,0 +1,787 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include <linux/pm_runtime.h> + +#include "i915_drv.h" +#include "intel_gt.h" +#include "intel_gt_pm.h" +#include "intel_rc6.h" +#include "intel_sideband.h" + +/** + * DOC: RC6 + * + * RC6 is a special power stage which allows the GPU to enter an very + * low-voltage mode when idle, using down to 0V while at this stage. This + * stage is entered automatically when the GPU is idle when RC6 support is + * enabled, and as soon as new workload arises GPU wakes up automatically as + * well. + * + * There are different RC6 modes available in Intel GPU, which differentiate + * among each other with the latency required to enter and leave RC6 and + * voltage consumed by the GPU in different states. + * + * The combination of the following flags define which states GPU is allowed + * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and + * RC6pp is deepest RC6. Their support by hardware varies according to the + * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one + * which brings the most power savings; deeper states save more power, but + * require higher latency to switch to and wake up. + */ + +static struct intel_gt *rc6_to_gt(struct intel_rc6 *rc6) +{ + return container_of(rc6, struct intel_gt, rc6); +} + +static struct intel_uncore *rc6_to_uncore(struct intel_rc6 *rc) +{ + return rc6_to_gt(rc)->uncore; +} + +static struct drm_i915_private *rc6_to_i915(struct intel_rc6 *rc) +{ + return rc6_to_gt(rc)->i915; +} + +static inline void set(struct intel_uncore *uncore, i915_reg_t reg, u32 val) +{ + intel_uncore_write_fw(uncore, reg, val); +} + +static void gen11_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* 2b: Program RC6 thresholds.*/ + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); + set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150); + + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + for_each_engine(engine, rc6_to_gt(rc6), id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + + set(uncore, GUC_MAX_IDLE_COUNT, 0xA); + + set(uncore, GEN6_RC_SLEEP, 0); + + set(uncore, GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ + + /* + * 2c: Program Coarse Power Gating Policies. + * + * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we + * use instead is a more conservative estimate for the maximum time + * it takes us to service a CS interrupt and submit a new ELSP - that + * is the time which the GPU is idle waiting for the CPU to select the + * next request to execute. If the idle hysteresis is less than that + * interrupt service latency, the hardware will automatically gate + * the power well and we will then incur the wake up cost on top of + * the service latency. A similar guide from plane_state is that we + * do not want the enable hysteresis to less than the wakeup latency. + * + * igt/gem_exec_nop/sequential provides a rough estimate for the + * service latency, and puts it around 10us for Broadwell (and other + * big core) and around 40us for Broxton (and other low power cores). + * [Note that for legacy ringbuffer submission, this is less than 1us!] + * However, the wakeup latency on Broxton is closer to 100us. To be + * conservative, we have to factor in a context switch on top (due + * to ksoftirqd). + */ + set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250); + set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250); + + /* 3a: Enable RC6 */ + set(uncore, GEN6_RC_CONTROL, + GEN6_RC_CTL_HW_ENABLE | + GEN6_RC_CTL_RC6_ENABLE | + GEN6_RC_CTL_EI_MODE(1)); + + set(uncore, GEN9_PG_ENABLE, + GEN9_RENDER_PG_ENABLE | + GEN9_MEDIA_PG_ENABLE | + GEN11_MEDIA_SAMPLER_PG_ENABLE); +} + +static void gen9_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 rc6_mode; + + /* 2b: Program RC6 thresholds.*/ + if (INTEL_GEN(rc6_to_i915(rc6)) >= 10) { + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); + set(uncore, GEN10_MEDIA_WAKE_RATE_LIMIT, 150); + } else if (IS_SKYLAKE(rc6_to_i915(rc6))) { + /* + * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only + * when CPG is enabled + */ + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); + } else { + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); + } + + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + for_each_engine(engine, rc6_to_gt(rc6), id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + + set(uncore, GUC_MAX_IDLE_COUNT, 0xA); + + set(uncore, GEN6_RC_SLEEP, 0); + + /* + * 2c: Program Coarse Power Gating Policies. + * + * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we + * use instead is a more conservative estimate for the maximum time + * it takes us to service a CS interrupt and submit a new ELSP - that + * is the time which the GPU is idle waiting for the CPU to select the + * next request to execute. If the idle hysteresis is less than that + * interrupt service latency, the hardware will automatically gate + * the power well and we will then incur the wake up cost on top of + * the service latency. A similar guide from plane_state is that we + * do not want the enable hysteresis to less than the wakeup latency. + * + * igt/gem_exec_nop/sequential provides a rough estimate for the + * service latency, and puts it around 10us for Broadwell (and other + * big core) and around 40us for Broxton (and other low power cores). + * [Note that for legacy ringbuffer submission, this is less than 1us!] + * However, the wakeup latency on Broxton is closer to 100us. To be + * conservative, we have to factor in a context switch on top (due + * to ksoftirqd). + */ + set(uncore, GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250); + set(uncore, GEN9_RENDER_PG_IDLE_HYSTERESIS, 250); + + /* 3a: Enable RC6 */ + set(uncore, GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ + + /* WaRsUseTimeoutMode:cnl (pre-prod) */ + if (IS_CNL_REVID(rc6_to_i915(rc6), CNL_REVID_A0, CNL_REVID_C0)) + rc6_mode = GEN7_RC_CTL_TO_MODE; + else + rc6_mode = GEN6_RC_CTL_EI_MODE(1); + + set(uncore, GEN6_RC_CONTROL, + GEN6_RC_CTL_HW_ENABLE | + GEN6_RC_CTL_RC6_ENABLE | + rc6_mode); + + /* + * WaRsDisableCoarsePowerGating:skl,cnl + * - Render/Media PG need to be disabled with RC6. + */ + if (!NEEDS_WaRsDisableCoarsePowerGating(rc6_to_i915(rc6))) + set(uncore, GEN9_PG_ENABLE, + GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE); +} + +static void gen8_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* 2b: Program RC6 thresholds.*/ + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + for_each_engine(engine, rc6_to_gt(rc6), id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + set(uncore, GEN6_RC_SLEEP, 0); + set(uncore, GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ + + /* 3: Enable RC6 */ + set(uncore, GEN6_RC_CONTROL, + GEN6_RC_CTL_HW_ENABLE | + GEN7_RC_CTL_TO_MODE | + GEN6_RC_CTL_RC6_ENABLE); +} + +static void gen6_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 rc6vids, rc6_mask; + int ret; + + set(uncore, GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); + set(uncore, GEN6_RC6pp_WAKE_RATE_LIMIT, 30); + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); + + for_each_engine(engine, rc6_to_gt(rc6), id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + + set(uncore, GEN6_RC_SLEEP, 0); + set(uncore, GEN6_RC1e_THRESHOLD, 1000); + if (IS_IVYBRIDGE(i915)) + set(uncore, GEN6_RC6_THRESHOLD, 125000); + else + set(uncore, GEN6_RC6_THRESHOLD, 50000); + set(uncore, GEN6_RC6p_THRESHOLD, 150000); + set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */ + + /* We don't use those on Haswell */ + rc6_mask = GEN6_RC_CTL_RC6_ENABLE; + if (HAS_RC6p(i915)) + rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; + if (HAS_RC6pp(i915)) + rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; + set(uncore, GEN6_RC_CONTROL, + rc6_mask | + GEN6_RC_CTL_EI_MODE(1) | + GEN6_RC_CTL_HW_ENABLE); + + rc6vids = 0; + ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, + &rc6vids, NULL); + if (IS_GEN(i915, 6) && ret) { + DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); + } else if (IS_GEN(i915, 6) && + (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { + DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", + GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); + rc6vids &= 0xffff00; + rc6vids |= GEN6_ENCODE_RC6_VID(450); + ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); + if (ret) + DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); + } +} + +/* Check that the pcbr address is not empty. */ +static int chv_rc6_init(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + resource_size_t pctx_paddr, paddr; + resource_size_t pctx_size = 32 * SZ_1K; + u32 pcbr; + + pcbr = intel_uncore_read(uncore, VLV_PCBR); + if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { + DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); + paddr = rc6_to_i915(rc6)->dsm.end + 1 - pctx_size; + GEM_BUG_ON(paddr > U32_MAX); + + pctx_paddr = (paddr & ~4095); + intel_uncore_write(uncore, VLV_PCBR, pctx_paddr); + } + + return 0; +} + +static int vlv_rc6_init(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct drm_i915_gem_object *pctx; + resource_size_t pctx_paddr; + resource_size_t pctx_size = 24 * SZ_1K; + u32 pcbr; + + pcbr = intel_uncore_read(uncore, VLV_PCBR); + if (pcbr) { + /* BIOS set it up already, grab the pre-alloc'd space */ + resource_size_t pcbr_offset; + + pcbr_offset = (pcbr & ~4095) - i915->dsm.start; + pctx = i915_gem_object_create_stolen_for_preallocated(i915, + pcbr_offset, + I915_GTT_OFFSET_NONE, + pctx_size); + if (IS_ERR(pctx)) + return PTR_ERR(pctx); + + goto out; + } + + DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); + + /* + * From the Gunit register HAS: + * The Gfx driver is expected to program this register and ensure + * proper allocation within Gfx stolen memory. For example, this + * register should be programmed such than the PCBR range does not + * overlap with other ranges, such as the frame buffer, protected + * memory, or any other relevant ranges. + */ + pctx = i915_gem_object_create_stolen(i915, pctx_size); + if (IS_ERR(pctx)) { + DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); + return PTR_ERR(pctx); + } + + GEM_BUG_ON(range_overflows_t(u64, + i915->dsm.start, + pctx->stolen->start, + U32_MAX)); + pctx_paddr = i915->dsm.start + pctx->stolen->start; + intel_uncore_write(uncore, VLV_PCBR, pctx_paddr); + +out: + rc6->pctx = pctx; + return 0; +} + +static void chv_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + + /* 2a: Program RC6 thresholds.*/ + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ + + for_each_engine(engine, rc6_to_gt(rc6), id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + set(uncore, GEN6_RC_SLEEP, 0); + + /* TO threshold set to 500 us (0x186 * 1.28 us) */ + set(uncore, GEN6_RC6_THRESHOLD, 0x186); + + /* Allows RC6 residency counter to work */ + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | + VLV_MEDIA_RC6_COUNT_EN | + VLV_RENDER_RC6_COUNT_EN)); + + /* 3: Enable RC6 */ + set(uncore, GEN6_RC_CONTROL, GEN7_RC_CTL_TO_MODE); +} + +static void vlv_rc6_enable(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct intel_engine_cs *engine; + enum intel_engine_id id; + + set(uncore, GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000); + set(uncore, GEN6_RC_EVALUATION_INTERVAL, 125000); + set(uncore, GEN6_RC_IDLE_HYSTERSIS, 25); + + for_each_engine(engine, rc6_to_gt(rc6), id) + set(uncore, RING_MAX_IDLE(engine->mmio_base), 10); + + set(uncore, GEN6_RC6_THRESHOLD, 0x557); + + /* Allows RC6 residency counter to work */ + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | + VLV_MEDIA_RC0_COUNT_EN | + VLV_RENDER_RC0_COUNT_EN | + VLV_MEDIA_RC6_COUNT_EN | + VLV_RENDER_RC6_COUNT_EN)); + + set(uncore, GEN6_RC_CONTROL, + GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL); +} + +static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6) +{ + struct intel_uncore *uncore = rc6_to_uncore(rc6); + struct drm_i915_private *i915 = rc6_to_i915(rc6); + u32 rc6_ctx_base, rc_ctl, rc_sw_target; + bool enable_rc6 = true; + + rc_ctl = intel_uncore_read(uncore, GEN6_RC_CONTROL); + rc_sw_target = intel_uncore_read(uncore, GEN6_RC_STATE); + rc_sw_target &= RC_SW_TARGET_STATE_MASK; + rc_sw_target >>= RC_SW_TARGET_STATE_SHIFT; + DRM_DEBUG_DRIVER("BIOS enabled RC states: " + "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n", + onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE), + onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE), + rc_sw_target); + + if (!(intel_uncore_read(uncore, RC6_LOCATION) & RC6_CTX_IN_DRAM)) { + DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n"); + enable_rc6 = false; + } + + /* + * The exact context size is not known for BXT, so assume a page size + * for this check. + */ + rc6_ctx_base = + intel_uncore_read(uncore, RC6_CTX_BASE) & RC6_CTX_BASE_MASK; + if (!(rc6_ctx_base >= i915->dsm_reserved.start && + rc6_ctx_base + PAGE_SIZE < i915->dsm_reserved.end)) { + DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n"); + enable_rc6 = false; + } + + if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) { + DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n"); + enable_rc6 = false; + } + + if (!intel_uncore_read(uncore, GEN8_PUSHBUS_CONTROL) || + !intel_uncore_read(uncore, GEN8_PUSHBUS_ENABLE) || + !intel_uncore_read(uncore, GEN8_PUSHBUS_SHIFT)) { + DRM_DEBUG_DRIVER("Pushbus not setup properly.\n"); + enable_rc6 = false; + } + + if (!intel_uncore_read(uncore, GEN6_GFXPAUSE)) { + DRM_DEBUG_DRIVER("GFX pause not setup properly.\n"); + enable_rc6 = false; + } + + if (!intel_uncore_read(uncore, GEN8_MISC_CTRL0)) { + DRM_DEBUG_DRIVER("GPM control not setup properly.\n"); + enable_rc6 = false; + } + + return enable_rc6; +} + +static bool rc6_supported(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + + if (!HAS_RC6(i915)) + return false; + + if (intel_vgpu_active(i915)) + return false; + + if (is_mock_gt(rc6_to_gt(rc6))) + return false; + + if (IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(rc6)) { + dev_notice(i915->drm.dev, + "RC6 and powersaving disabled by BIOS\n"); + return false; + } + + return true; +} + +static void rpm_get(struct intel_rc6 *rc6) +{ + GEM_BUG_ON(rc6->wakeref); + pm_runtime_get_sync(&rc6_to_i915(rc6)->drm.pdev->dev); + rc6->wakeref = true; +} + +static void rpm_put(struct intel_rc6 *rc6) +{ + GEM_BUG_ON(!rc6->wakeref); + pm_runtime_put(&rc6_to_i915(rc6)->drm.pdev->dev); + rc6->wakeref = false; +} + +static bool intel_rc6_ctx_corrupted(struct intel_rc6 *rc6) +{ + return !intel_uncore_read(rc6_to_uncore(rc6), GEN8_RC6_CTX_INFO); +} + +static void intel_rc6_ctx_wa_init(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + return; + + if (intel_rc6_ctx_corrupted(rc6)) { + DRM_INFO("RC6 context corrupted, disabling runtime power management\n"); + rc6->ctx_corrupted = true; + } +} + +/** + * intel_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA + * @rc6: rc6 state + * + * Perform any steps needed to re-init the RC6 CTX WA after system resume. + */ +void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6) +{ + if (rc6->ctx_corrupted && !intel_rc6_ctx_corrupted(rc6)) { + DRM_INFO("RC6 context restored, re-enabling runtime power management\n"); + rc6->ctx_corrupted = false; + } +} + +/** + * intel_rc6_ctx_wa_check - check for a new RC6 CTX corruption + * @rc6: rc6 state + * + * Check if an RC6 CTX corruption has happened since the last check and if so + * disable RC6 and runtime power management. +*/ +void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + + if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) + return; + + if (rc6->ctx_corrupted) + return; + + if (!intel_rc6_ctx_corrupted(rc6)) + return; + + DRM_NOTE("RC6 context corruption, disabling runtime power management\n"); + + intel_rc6_disable(rc6); + rc6->ctx_corrupted = true; + + return; +} + +static void __intel_rc6_disable(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_uncore *uncore = rc6_to_uncore(rc6); + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + if (INTEL_GEN(i915) >= 9) + set(uncore, GEN9_PG_ENABLE, 0); + set(uncore, GEN6_RC_CONTROL, 0); + set(uncore, GEN6_RC_STATE, 0); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); +} + +void intel_rc6_init(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + int err; + + /* Disable runtime-pm until we can save the GPU state with rc6 pctx */ + rpm_get(rc6); + + if (!rc6_supported(rc6)) + return; + + intel_rc6_ctx_wa_init(rc6); + + if (IS_CHERRYVIEW(i915)) + err = chv_rc6_init(rc6); + else if (IS_VALLEYVIEW(i915)) + err = vlv_rc6_init(rc6); + else + err = 0; + + /* Sanitize rc6, ensure it is disabled before we are ready. */ + __intel_rc6_disable(rc6); + + rc6->supported = err == 0; +} + +void intel_rc6_sanitize(struct intel_rc6 *rc6) +{ + if (rc6->enabled) { /* unbalanced suspend/resume */ + rpm_get(rc6); + rc6->enabled = false; + } + + if (rc6->supported) + __intel_rc6_disable(rc6); +} + +void intel_rc6_enable(struct intel_rc6 *rc6) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_uncore *uncore = rc6_to_uncore(rc6); + + if (!rc6->supported) + return; + + GEM_BUG_ON(rc6->enabled); + + if (rc6->ctx_corrupted) + return; + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + + if (IS_CHERRYVIEW(i915)) + chv_rc6_enable(rc6); + else if (IS_VALLEYVIEW(i915)) + vlv_rc6_enable(rc6); + else if (INTEL_GEN(i915) >= 11) + gen11_rc6_enable(rc6); + else if (INTEL_GEN(i915) >= 9) + gen9_rc6_enable(rc6); + else if (IS_BROADWELL(i915)) + gen8_rc6_enable(rc6); + else if (INTEL_GEN(i915) >= 6) + gen6_rc6_enable(rc6); + + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + + /* rc6 is ready, runtime-pm is go! */ + rpm_put(rc6); + rc6->enabled = true; +} + +void intel_rc6_disable(struct intel_rc6 *rc6) +{ + if (!rc6->enabled) + return; + + rpm_get(rc6); + rc6->enabled = false; + + __intel_rc6_disable(rc6); +} + +void intel_rc6_fini(struct intel_rc6 *rc6) +{ + struct drm_i915_gem_object *pctx; + + intel_rc6_disable(rc6); + + pctx = fetch_and_zero(&rc6->pctx); + if (pctx) + i915_gem_object_put(pctx); + + if (rc6->wakeref) + rpm_put(rc6); +} + +static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg) +{ + u32 lower, upper, tmp; + int loop = 2; + + /* + * The register accessed do not need forcewake. We borrow + * uncore lock to prevent concurrent access to range reg. + */ + lockdep_assert_held(&uncore->lock); + + /* + * vlv and chv residency counters are 40 bits in width. + * With a control bit, we can choose between upper or lower + * 32bit window into this counter. + * + * Although we always use the counter in high-range mode elsewhere, + * userspace may attempt to read the value before rc6 is initialised, + * before we have set the default VLV_COUNTER_CONTROL value. So always + * set the high bit to be safe. + */ + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); + upper = intel_uncore_read_fw(uncore, reg); + do { + tmp = upper; + + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH)); + lower = intel_uncore_read_fw(uncore, reg); + + set(uncore, VLV_COUNTER_CONTROL, + _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); + upper = intel_uncore_read_fw(uncore, reg); + } while (upper != tmp && --loop); + + /* + * Everywhere else we always use VLV_COUNTER_CONTROL with the + * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set + * now. + */ + + return lower | (u64)upper << 8; +} + +u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg) +{ + struct drm_i915_private *i915 = rc6_to_i915(rc6); + struct intel_uncore *uncore = rc6_to_uncore(rc6); + u64 time_hw, prev_hw, overflow_hw; + unsigned int fw_domains; + unsigned long flags; + unsigned int i; + u32 mul, div; + + if (!rc6->supported) + return 0; + + /* + * Store previous hw counter values for counter wrap-around handling. + * + * There are only four interesting registers and they live next to each + * other so we can use the relative address, compared to the smallest + * one as the index into driver storage. + */ + i = (i915_mmio_reg_offset(reg) - + i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32); + if (WARN_ON_ONCE(i >= ARRAY_SIZE(rc6->cur_residency))) + return 0; + + fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); + + spin_lock_irqsave(&uncore->lock, flags); + intel_uncore_forcewake_get__locked(uncore, fw_domains); + + /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + mul = 1000000; + div = i915->czclk_freq; + overflow_hw = BIT_ULL(40); + time_hw = vlv_residency_raw(uncore, reg); + } else { + /* 833.33ns units on Gen9LP, 1.28us elsewhere. */ + if (IS_GEN9_LP(i915)) { + mul = 10000; + div = 12; + } else { + mul = 1280; + div = 1; + } + + overflow_hw = BIT_ULL(32); + time_hw = intel_uncore_read_fw(uncore, reg); + } + + /* + * Counter wrap handling. + * + * But relying on a sufficient frequency of queries otherwise counters + * can still wrap. + */ + prev_hw = rc6->prev_hw_residency[i]; + rc6->prev_hw_residency[i] = time_hw; + + /* RC6 delta from last sample. */ + if (time_hw >= prev_hw) + time_hw -= prev_hw; + else + time_hw += overflow_hw - prev_hw; + + /* Add delta to RC6 extended raw driver copy. */ + time_hw += rc6->cur_residency[i]; + rc6->cur_residency[i] = time_hw; + + intel_uncore_forcewake_put__locked(uncore, fw_domains); + spin_unlock_irqrestore(&uncore->lock, flags); + + return mul_u64_u32_div(time_hw, mul, div); +} + +u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg) +{ + return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(rc6, reg), 1000); +} diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.h b/drivers/gpu/drm/i915/gt/intel_rc6.h new file mode 100644 index 000000000000..1370f6834a4c --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rc6.h @@ -0,0 +1,28 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RC6_H +#define INTEL_RC6_H + +#include "i915_reg.h" + +struct intel_engine_cs; +struct intel_rc6; + +void intel_rc6_init(struct intel_rc6 *rc6); +void intel_rc6_fini(struct intel_rc6 *rc6); + +void intel_rc6_sanitize(struct intel_rc6 *rc6); +void intel_rc6_enable(struct intel_rc6 *rc6); +void intel_rc6_disable(struct intel_rc6 *rc6); + +u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, i915_reg_t reg); +u64 intel_rc6_residency_us(struct intel_rc6 *rc6, i915_reg_t reg); + +void intel_rc6_ctx_wa_check(struct intel_rc6 *rc6); +void intel_rc6_ctx_wa_resume(struct intel_rc6 *rc6); + +#endif /* INTEL_RC6_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_rc6_types.h b/drivers/gpu/drm/i915/gt/intel_rc6_types.h new file mode 100644 index 000000000000..89ad5697a8d4 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rc6_types.h @@ -0,0 +1,29 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RC6_TYPES_H +#define INTEL_RC6_TYPES_H + +#include <linux/spinlock.h> +#include <linux/types.h> + +#include "intel_engine_types.h" + +struct drm_i915_gem_object; + +struct intel_rc6 { + u64 prev_hw_residency[4]; + u64 cur_residency[4]; + + struct drm_i915_gem_object *pctx; + + bool supported : 1; + bool enabled : 1; + bool wakeref : 1; + bool ctx_corrupted : 1; +}; + +#endif /* INTEL_RC6_TYPES_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_renderstate.c b/drivers/gpu/drm/i915/gt/intel_renderstate.c index 6d05f9c64178..c4edc35e7d89 100644 --- a/drivers/gpu/drm/i915/gt/intel_renderstate.c +++ b/drivers/gpu/drm/i915/gt/intel_renderstate.c @@ -27,6 +27,7 @@ #include "i915_drv.h" #include "intel_renderstate.h" +#include "intel_ring.h" struct intel_renderstate { const struct intel_renderstate_rodata *rodata; diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 8cea42379dd7..f03e000051c1 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -282,14 +282,14 @@ static int gen6_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - struct intel_engine_cs *engine; - const u32 hw_engine_mask[] = { + static const u32 hw_engine_mask[] = { [RCS0] = GEN6_GRDOM_RENDER, [BCS0] = GEN6_GRDOM_BLT, [VCS0] = GEN6_GRDOM_MEDIA, [VCS1] = GEN8_GRDOM_MEDIA2, [VECS0] = GEN6_GRDOM_VECS, }; + struct intel_engine_cs *engine; u32 hw_mask; if (engine_mask == ALL_ENGINES) { @@ -298,7 +298,7 @@ static int gen6_reset_engines(struct intel_gt *gt, intel_engine_mask_t tmp; hw_mask = 0; - for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { + for_each_engine_masked(engine, gt, engine_mask, tmp) { GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); hw_mask |= hw_engine_mask[engine->id]; } @@ -307,7 +307,7 @@ static int gen6_reset_engines(struct intel_gt *gt, return gen6_hw_domain_reset(gt, hw_mask); } -static u32 gen11_lock_sfc(struct intel_engine_cs *engine) +static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask) { struct intel_uncore *uncore = engine->uncore; u8 vdbox_sfc_access = RUNTIME_INFO(engine->i915)->vdbox_sfc_access; @@ -316,6 +316,7 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine) i915_reg_t sfc_usage; u32 sfc_usage_bit; u32 sfc_reset_bit; + int ret; switch (engine->class) { case VIDEO_DECODE_CLASS: @@ -350,27 +351,33 @@ static u32 gen11_lock_sfc(struct intel_engine_cs *engine) } /* - * Tell the engine that a software reset is going to happen. The engine - * will then try to force lock the SFC (if currently locked, it will - * remain so until we tell the engine it is safe to unlock; if currently - * unlocked, it will ignore this and all new lock requests). If SFC - * ends up being locked to the engine we want to reset, we have to reset - * it as well (we will unlock it once the reset sequence is completed). + * If the engine is using a SFC, tell the engine that a software reset + * is going to happen. The engine will then try to force lock the SFC. + * If SFC ends up being locked to the engine we want to reset, we have + * to reset it as well (we will unlock it once the reset sequence is + * completed). */ + if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)) + return 0; + rmw_set_fw(uncore, sfc_forced_lock, sfc_forced_lock_bit); - if (__intel_wait_for_register_fw(uncore, - sfc_forced_lock_ack, - sfc_forced_lock_ack_bit, - sfc_forced_lock_ack_bit, - 1000, 0, NULL)) { - DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n"); + ret = __intel_wait_for_register_fw(uncore, + sfc_forced_lock_ack, + sfc_forced_lock_ack_bit, + sfc_forced_lock_ack_bit, + 1000, 0, NULL); + + /* Was the SFC released while we were trying to lock it? */ + if (!(intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit)) return 0; - } - if (intel_uncore_read_fw(uncore, sfc_usage) & sfc_usage_bit) - return sfc_reset_bit; + if (ret) { + DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n"); + return ret; + } + *hw_mask |= sfc_reset_bit; return 0; } @@ -406,7 +413,7 @@ static int gen11_reset_engines(struct intel_gt *gt, intel_engine_mask_t engine_mask, unsigned int retry) { - const u32 hw_engine_mask[] = { + static const u32 hw_engine_mask[] = { [RCS0] = GEN11_GRDOM_RENDER, [BCS0] = GEN11_GRDOM_BLT, [VCS0] = GEN11_GRDOM_MEDIA, @@ -425,17 +432,26 @@ static int gen11_reset_engines(struct intel_gt *gt, hw_mask = GEN11_GRDOM_FULL; } else { hw_mask = 0; - for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { + for_each_engine_masked(engine, gt, engine_mask, tmp) { GEM_BUG_ON(engine->id >= ARRAY_SIZE(hw_engine_mask)); hw_mask |= hw_engine_mask[engine->id]; - hw_mask |= gen11_lock_sfc(engine); + ret = gen11_lock_sfc(engine, &hw_mask); + if (ret) + goto sfc_unlock; } } ret = gen6_hw_domain_reset(gt, hw_mask); +sfc_unlock: + /* + * We unlock the SFC based on the lock status and not the result of + * gen11_lock_sfc to make sure that we clean properly if something + * wrong happened during the lock (e.g. lock acquired after timeout + * expiration). + */ if (engine_mask != ALL_ENGINES) - for_each_engine_masked(engine, gt->i915, engine_mask, tmp) + for_each_engine_masked(engine, gt, engine_mask, tmp) gen11_unlock_sfc(engine); return ret; @@ -494,7 +510,7 @@ static int gen8_reset_engines(struct intel_gt *gt, intel_engine_mask_t tmp; int ret; - for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { + for_each_engine_masked(engine, gt, engine_mask, tmp) { ret = gen8_engine_reset_prepare(engine); if (ret && !reset_non_ready) goto skip_reset; @@ -520,19 +536,30 @@ static int gen8_reset_engines(struct intel_gt *gt, ret = gen6_reset_engines(gt, engine_mask, retry); skip_reset: - for_each_engine_masked(engine, gt->i915, engine_mask, tmp) + for_each_engine_masked(engine, gt, engine_mask, tmp) gen8_engine_reset_cancel(engine); return ret; } +static int mock_reset(struct intel_gt *gt, + intel_engine_mask_t mask, + unsigned int retry) +{ + return 0; +} + typedef int (*reset_func)(struct intel_gt *, intel_engine_mask_t engine_mask, unsigned int retry); -static reset_func intel_get_gpu_reset(struct drm_i915_private *i915) +static reset_func intel_get_gpu_reset(const struct intel_gt *gt) { - if (INTEL_GEN(i915) >= 8) + struct drm_i915_private *i915 = gt->i915; + + if (is_mock_gt(gt)) + return mock_reset; + else if (INTEL_GEN(i915) >= 8) return gen8_reset_engines; else if (INTEL_GEN(i915) >= 6) return gen6_reset_engines; @@ -555,7 +582,7 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) int ret = -ETIMEDOUT; int retry; - reset = intel_get_gpu_reset(gt->i915); + reset = intel_get_gpu_reset(gt); if (!reset) return -ENODEV; @@ -575,17 +602,20 @@ int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask) return ret; } -bool intel_has_gpu_reset(struct drm_i915_private *i915) +bool intel_has_gpu_reset(const struct intel_gt *gt) { if (!i915_modparams.reset) return NULL; - return intel_get_gpu_reset(i915); + return intel_get_gpu_reset(gt); } -bool intel_has_reset_engine(struct drm_i915_private *i915) +bool intel_has_reset_engine(const struct intel_gt *gt) { - return INTEL_INFO(i915)->has_reset_engine && i915_modparams.reset >= 2; + if (i915_modparams.reset < 2) + return false; + + return INTEL_INFO(gt->i915)->has_reset_engine; } int intel_reset_guc(struct intel_gt *gt) @@ -652,7 +682,7 @@ static intel_engine_mask_t reset_prepare(struct intel_gt *gt) intel_engine_mask_t awake = 0; enum intel_engine_id id; - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { if (intel_engine_pm_get_if_awake(engine)) awake |= engine->mask; reset_prepare_engine(engine); @@ -682,10 +712,10 @@ static int gt_reset(struct intel_gt *gt, intel_engine_mask_t stalled_mask) if (err) return err; - for_each_engine(engine, gt->i915, id) + for_each_engine(engine, gt, id) __intel_engine_reset(engine, stalled_mask & engine->mask); - i915_gem_restore_fences(gt->i915); + i915_gem_restore_fences(gt->ggtt); return err; } @@ -695,7 +725,7 @@ static void reset_finish_engine(struct intel_engine_cs *engine) engine->reset.finish(engine); intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL); - intel_engine_signal_breadcrumbs(engine); + intel_engine_breadcrumbs_irq(engine); } static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) @@ -703,7 +733,7 @@ static void reset_finish(struct intel_gt *gt, intel_engine_mask_t awake) struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { reset_finish_engine(engine); if (awake & engine->mask) intel_engine_pm_put(engine); @@ -739,7 +769,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) { struct drm_printer p = drm_debug_printer(__func__); - for_each_engine(engine, gt->i915, id) + for_each_engine(engine, gt, id) intel_engine_dump(engine, &p, "%s\n", engine->name); } @@ -756,7 +786,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) __intel_gt_reset(gt, ALL_ENGINES); - for_each_engine(engine, gt->i915, id) + for_each_engine(engine, gt, id) engine->submit_request = nop_submit_request; /* @@ -768,7 +798,7 @@ static void __intel_gt_set_wedged(struct intel_gt *gt) set_bit(I915_WEDGED, >->reset.flags); /* Mark all executing requests as skipped */ - for_each_engine(engine, gt->i915, id) + for_each_engine(engine, gt, id) engine->cancel_requests(engine); reset_finish(gt, awake); @@ -781,7 +811,7 @@ void intel_gt_set_wedged(struct intel_gt *gt) intel_wakeref_t wakeref; mutex_lock(>->reset.mutex); - with_intel_runtime_pm(>->i915->runtime_pm, wakeref) + with_intel_runtime_pm(gt->uncore->rpm, wakeref) __intel_gt_set_wedged(gt); mutex_unlock(>->reset.mutex); } @@ -791,11 +821,13 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) struct intel_gt_timelines *timelines = >->timelines; struct intel_timeline *tl; unsigned long flags; + bool ok; if (!test_bit(I915_WEDGED, >->reset.flags)) return true; - if (!gt->scratch) /* Never full initialised, recovery impossible */ + /* Never fully initialised, recovery impossible */ + if (test_bit(I915_WEDGED_ON_INIT, >->reset.flags)) return false; GEM_TRACE("start\n"); @@ -812,10 +844,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) */ spin_lock_irqsave(&timelines->lock, flags); list_for_each_entry(tl, &timelines->active_list, link) { - struct i915_request *rq; + struct dma_fence *fence; - rq = i915_active_request_get_unlocked(&tl->last_request); - if (!rq) + fence = i915_active_fence_get(&tl->last_request); + if (!fence) continue; spin_unlock_irqrestore(&timelines->lock, flags); @@ -827,8 +859,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) * (I915_FENCE_TIMEOUT) so this wait should not be unbounded * in the worst case. */ - dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT); - i915_request_put(rq); + dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT); + dma_fence_put(fence); /* Restart iteration after droping lock */ spin_lock_irqsave(&timelines->lock, flags); @@ -836,7 +868,18 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt) } spin_unlock_irqrestore(&timelines->lock, flags); - intel_gt_sanitize(gt, false); + /* We must reset pending GPU events before restoring our submission */ + ok = !HAS_EXECLISTS(gt->i915); /* XXX better agnosticism desired */ + if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display) + ok = __intel_gt_reset(gt, ALL_ENGINES) == 0; + if (!ok) { + /* + * Warn CI about the unrecoverable wedged condition. + * Time for a reboot. + */ + add_taint_for_CI(TAINT_WARN); + return false; + } /* * Undo nop_submit_request. We prevent all new i915 requests from @@ -891,7 +934,7 @@ static int resume(struct intel_gt *gt) enum intel_engine_id id; int ret; - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { ret = engine->resume(engine); if (ret) return ret; @@ -941,7 +984,7 @@ void intel_gt_reset(struct intel_gt *gt, awake = reset_prepare(gt); - if (!intel_has_gpu_reset(gt->i915)) { + if (!intel_has_gpu_reset(gt)) { if (i915_modparams.reset) dev_err(gt->i915->drm.dev, "GPU reset not supported\n"); else @@ -970,7 +1013,7 @@ void intel_gt_reset(struct intel_gt *gt, * was running at the time of the reset (i.e. we weren't VT * switched away). */ - ret = i915_gem_init_hw(gt->i915); + ret = intel_gt_init_hw(gt); if (ret) { DRM_ERROR("Failed to initialise HW following reset (%d)\n", ret); @@ -981,8 +1024,6 @@ void intel_gt_reset(struct intel_gt *gt, if (ret) goto taint; - intel_gt_queue_hangcheck(gt); - finish: reset_finish(gt, awake); unlock: @@ -1149,7 +1190,7 @@ void intel_gt_handle_error(struct intel_gt *gt, * isn't the case at least when we get here by doing a * simulated reset via debugfs, so get an RPM reference. */ - wakeref = intel_runtime_pm_get(>->i915->runtime_pm); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); engine_mask &= INTEL_INFO(gt->i915)->engine_mask; @@ -1162,8 +1203,8 @@ void intel_gt_handle_error(struct intel_gt *gt, * Try engine reset when available. We fall back to full reset if * single reset fails. */ - if (intel_has_reset_engine(gt->i915) && !intel_gt_is_wedged(gt)) { - for_each_engine_masked(engine, gt->i915, engine_mask, tmp) { + if (intel_has_reset_engine(gt) && !intel_gt_is_wedged(gt)) { + for_each_engine_masked(engine, gt, engine_mask, tmp) { BUILD_BUG_ON(I915_RESET_MODESET >= I915_RESET_ENGINE); if (test_and_set_bit(I915_RESET_ENGINE + engine->id, >->reset.flags)) @@ -1191,7 +1232,7 @@ void intel_gt_handle_error(struct intel_gt *gt, synchronize_rcu_expedited(); /* Prevent any other reset-engine attempt. */ - for_each_engine(engine, gt->i915, tmp) { + for_each_engine(engine, gt, tmp) { while (test_and_set_bit(I915_RESET_ENGINE + engine->id, >->reset.flags)) wait_on_bit(>->reset.flags, @@ -1201,7 +1242,7 @@ void intel_gt_handle_error(struct intel_gt *gt, intel_gt_reset_global(gt, engine_mask, msg); - for_each_engine(engine, gt->i915, tmp) + for_each_engine(engine, gt, tmp) clear_bit_unlock(I915_RESET_ENGINE + engine->id, >->reset.flags); clear_bit_unlock(I915_RESET_BACKOFF, >->reset.flags); @@ -1209,7 +1250,7 @@ void intel_gt_handle_error(struct intel_gt *gt, wake_up_all(>->reset.queue); out: - intel_runtime_pm_put(>->i915->runtime_pm, wakeref); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); } int intel_gt_reset_trylock(struct intel_gt *gt, int *srcu) @@ -1251,10 +1292,6 @@ int intel_gt_terminally_wedged(struct intel_gt *gt) if (!test_bit(I915_RESET_BACKOFF, >->reset.flags)) return -EIO; - /* XXX intel_reset_finish() still takes struct_mutex!!! */ - if (mutex_is_locked(>->i915->drm.struct_mutex)) - return -EAGAIN; - if (wait_event_interruptible(gt->reset.queue, !test_bit(I915_RESET_BACKOFF, >->reset.flags))) @@ -1263,6 +1300,14 @@ int intel_gt_terminally_wedged(struct intel_gt *gt) return intel_gt_is_wedged(gt) ? -EIO : 0; } +void intel_gt_set_wedged_on_init(struct intel_gt *gt) +{ + BUILD_BUG_ON(I915_RESET_ENGINE + I915_NUM_ENGINES > + I915_WEDGED_ON_INIT); + intel_gt_set_wedged(gt); + set_bit(I915_WEDGED_ON_INIT, >->reset.flags); +} + void intel_gt_init_reset(struct intel_gt *gt) { init_waitqueue_head(>->reset.queue); @@ -1306,4 +1351,5 @@ void __intel_fini_wedge(struct intel_wedge_me *w) #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftest_reset.c" +#include "selftest_hangcheck.c" #endif diff --git a/drivers/gpu/drm/i915/gt/intel_reset.h b/drivers/gpu/drm/i915/gt/intel_reset.h index 52c00199e069..8e8d5f761166 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.h +++ b/drivers/gpu/drm/i915/gt/intel_reset.h @@ -14,7 +14,6 @@ #include "intel_engine_types.h" #include "intel_reset_types.h" -struct drm_i915_private; struct i915_request; struct intel_engine_cs; struct intel_gt; @@ -45,6 +44,12 @@ void intel_gt_set_wedged(struct intel_gt *gt); bool intel_gt_unset_wedged(struct intel_gt *gt); int intel_gt_terminally_wedged(struct intel_gt *gt); +/* + * There's no unset_wedged_on_init paired with this one. + * Once we're wedged on init, there's no going back. + */ +void intel_gt_set_wedged_on_init(struct intel_gt *gt); + int __intel_gt_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask); int intel_reset_guc(struct intel_gt *gt); @@ -68,10 +73,13 @@ void __intel_fini_wedge(struct intel_wedge_me *w); static inline bool __intel_reset_failed(const struct intel_reset *reset) { + GEM_BUG_ON(test_bit(I915_WEDGED_ON_INIT, &reset->flags) ? + !test_bit(I915_WEDGED, &reset->flags) : false); + return unlikely(test_bit(I915_WEDGED, &reset->flags)); } -bool intel_has_gpu_reset(struct drm_i915_private *i915); -bool intel_has_reset_engine(struct drm_i915_private *i915); +bool intel_has_gpu_reset(const struct intel_gt *gt); +bool intel_has_reset_engine(const struct intel_gt *gt); #endif /* I915_RESET_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_reset_types.h b/drivers/gpu/drm/i915/gt/intel_reset_types.h index 31968356e0c0..f43bc3a0fe4f 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset_types.h +++ b/drivers/gpu/drm/i915/gt/intel_reset_types.h @@ -29,11 +29,17 @@ struct intel_reset { * we set the #I915_WEDGED bit. Prior to command submission, e.g. * i915_request_alloc(), this bit is checked and the sequence * aborted (with -EIO reported to userspace) if set. + * + * #I915_WEDGED_ON_INIT - If we fail to initialize the GPU we can no + * longer use the GPU - similar to #I915_WEDGED bit. The difference in + * in the way we're handling "forced" unwedged (e.g. through debugfs), + * which is not allowed in case we failed to initialize. */ unsigned long flags; #define I915_RESET_BACKOFF 0 #define I915_RESET_MODESET 1 #define I915_RESET_ENGINE 2 +#define I915_WEDGED_ON_INIT (BITS_PER_LONG - 2) #define I915_WEDGED (BITS_PER_LONG - 1) struct mutex mutex; /* serialises wedging/unwedging */ diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c new file mode 100644 index 000000000000..ece20504d240 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -0,0 +1,323 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "gem/i915_gem_object.h" +#include "i915_drv.h" +#include "i915_vma.h" +#include "intel_engine.h" +#include "intel_ring.h" +#include "intel_timeline.h" + +unsigned int intel_ring_update_space(struct intel_ring *ring) +{ + unsigned int space; + + space = __intel_ring_space(ring->head, ring->emit, ring->size); + + ring->space = space; + return space; +} + +int intel_ring_pin(struct intel_ring *ring) +{ + struct i915_vma *vma = ring->vma; + unsigned int flags; + void *addr; + int ret; + + if (atomic_fetch_inc(&ring->pin_count)) + return 0; + + flags = PIN_GLOBAL; + + /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ + flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); + + if (vma->obj->stolen) + flags |= PIN_MAPPABLE; + else + flags |= PIN_HIGH; + + ret = i915_vma_pin(vma, 0, 0, flags); + if (unlikely(ret)) + goto err_unpin; + + if (i915_vma_is_map_and_fenceable(vma)) + addr = (void __force *)i915_vma_pin_iomap(vma); + else + addr = i915_gem_object_pin_map(vma->obj, + i915_coherent_map_type(vma->vm->i915)); + if (IS_ERR(addr)) { + ret = PTR_ERR(addr); + goto err_ring; + } + + i915_vma_make_unshrinkable(vma); + + GEM_BUG_ON(ring->vaddr); + ring->vaddr = addr; + + return 0; + +err_ring: + i915_vma_unpin(vma); +err_unpin: + atomic_dec(&ring->pin_count); + return ret; +} + +void intel_ring_reset(struct intel_ring *ring, u32 tail) +{ + tail = intel_ring_wrap(ring, tail); + ring->tail = tail; + ring->head = tail; + ring->emit = tail; + intel_ring_update_space(ring); +} + +void intel_ring_unpin(struct intel_ring *ring) +{ + struct i915_vma *vma = ring->vma; + + if (!atomic_dec_and_test(&ring->pin_count)) + return; + + /* Discard any unused bytes beyond that submitted to hw. */ + intel_ring_reset(ring, ring->emit); + + i915_vma_unset_ggtt_write(vma); + if (i915_vma_is_map_and_fenceable(vma)) + i915_vma_unpin_iomap(vma); + else + i915_gem_object_unpin_map(vma->obj); + + GEM_BUG_ON(!ring->vaddr); + ring->vaddr = NULL; + + i915_vma_unpin(vma); + i915_vma_make_purgeable(vma); +} + +static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) +{ + struct i915_address_space *vm = &ggtt->vm; + struct drm_i915_private *i915 = vm->i915; + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + + obj = ERR_PTR(-ENODEV); + if (i915_ggtt_has_aperture(ggtt)) + obj = i915_gem_object_create_stolen(i915, size); + if (IS_ERR(obj)) + obj = i915_gem_object_create_internal(i915, size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + /* + * Mark ring buffers as read-only from GPU side (so no stray overwrites) + * if supported by the platform's GGTT. + */ + if (vm->has_read_only) + i915_gem_object_set_readonly(obj); + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) + goto err; + + return vma; + +err: + i915_gem_object_put(obj); + return vma; +} + +struct intel_ring * +intel_engine_create_ring(struct intel_engine_cs *engine, int size) +{ + struct drm_i915_private *i915 = engine->i915; + struct intel_ring *ring; + struct i915_vma *vma; + + GEM_BUG_ON(!is_power_of_2(size)); + GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); + + ring = kzalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + return ERR_PTR(-ENOMEM); + + kref_init(&ring->ref); + ring->size = size; + + /* + * Workaround an erratum on the i830 which causes a hang if + * the TAIL pointer points to within the last 2 cachelines + * of the buffer. + */ + ring->effective_size = size; + if (IS_I830(i915) || IS_I845G(i915)) + ring->effective_size -= 2 * CACHELINE_BYTES; + + intel_ring_update_space(ring); + + vma = create_ring_vma(engine->gt->ggtt, size); + if (IS_ERR(vma)) { + kfree(ring); + return ERR_CAST(vma); + } + ring->vma = vma; + + return ring; +} + +void intel_ring_free(struct kref *ref) +{ + struct intel_ring *ring = container_of(ref, typeof(*ring), ref); + + i915_vma_put(ring->vma); + kfree(ring); +} + +static noinline int +wait_for_space(struct intel_ring *ring, + struct intel_timeline *tl, + unsigned int bytes) +{ + struct i915_request *target; + long timeout; + + if (intel_ring_update_space(ring) >= bytes) + return 0; + + GEM_BUG_ON(list_empty(&tl->requests)); + list_for_each_entry(target, &tl->requests, link) { + if (target->ring != ring) + continue; + + /* Would completion of this request free enough space? */ + if (bytes <= __intel_ring_space(target->postfix, + ring->emit, ring->size)) + break; + } + + if (GEM_WARN_ON(&target->link == &tl->requests)) + return -ENOSPC; + + timeout = i915_request_wait(target, + I915_WAIT_INTERRUPTIBLE, + MAX_SCHEDULE_TIMEOUT); + if (timeout < 0) + return timeout; + + i915_request_retire_upto(target); + + intel_ring_update_space(ring); + GEM_BUG_ON(ring->space < bytes); + return 0; +} + +u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) +{ + struct intel_ring *ring = rq->ring; + const unsigned int remain_usable = ring->effective_size - ring->emit; + const unsigned int bytes = num_dwords * sizeof(u32); + unsigned int need_wrap = 0; + unsigned int total_bytes; + u32 *cs; + + /* Packets must be qword aligned. */ + GEM_BUG_ON(num_dwords & 1); + + total_bytes = bytes + rq->reserved_space; + GEM_BUG_ON(total_bytes > ring->effective_size); + + if (unlikely(total_bytes > remain_usable)) { + const int remain_actual = ring->size - ring->emit; + + if (bytes > remain_usable) { + /* + * Not enough space for the basic request. So need to + * flush out the remainder and then wait for + * base + reserved. + */ + total_bytes += remain_actual; + need_wrap = remain_actual | 1; + } else { + /* + * The base request will fit but the reserved space + * falls off the end. So we don't need an immediate + * wrap and only need to effectively wait for the + * reserved size from the start of ringbuffer. + */ + total_bytes = rq->reserved_space + remain_actual; + } + } + + if (unlikely(total_bytes > ring->space)) { + int ret; + + /* + * Space is reserved in the ringbuffer for finalising the + * request, as that cannot be allowed to fail. During request + * finalisation, reserved_space is set to 0 to stop the + * overallocation and the assumption is that then we never need + * to wait (which has the risk of failing with EINTR). + * + * See also i915_request_alloc() and i915_request_add(). + */ + GEM_BUG_ON(!rq->reserved_space); + + ret = wait_for_space(ring, + i915_request_timeline(rq), + total_bytes); + if (unlikely(ret)) + return ERR_PTR(ret); + } + + if (unlikely(need_wrap)) { + need_wrap &= ~1; + GEM_BUG_ON(need_wrap > ring->space); + GEM_BUG_ON(ring->emit + need_wrap > ring->size); + GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64))); + + /* Fill the tail with MI_NOOP */ + memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64)); + ring->space -= need_wrap; + ring->emit = 0; + } + + GEM_BUG_ON(ring->emit > ring->size - bytes); + GEM_BUG_ON(ring->space < bytes); + cs = ring->vaddr + ring->emit; + GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs))); + ring->emit += bytes; + ring->space -= bytes; + + return cs; +} + +/* Align the ring tail to a cacheline boundary */ +int intel_ring_cacheline_align(struct i915_request *rq) +{ + int num_dwords; + void *cs; + + num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32); + if (num_dwords == 0) + return 0; + + num_dwords = CACHELINE_DWORDS - num_dwords; + GEM_BUG_ON(num_dwords & 1); + + cs = intel_ring_begin(rq, num_dwords); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2); + intel_ring_advance(rq, cs + num_dwords); + + GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1)); + return 0; +} diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h new file mode 100644 index 000000000000..ea2839d9e044 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ring.h @@ -0,0 +1,131 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RING_H +#define INTEL_RING_H + +#include "i915_gem.h" /* GEM_BUG_ON */ +#include "i915_request.h" +#include "intel_ring_types.h" + +struct intel_engine_cs; + +struct intel_ring * +intel_engine_create_ring(struct intel_engine_cs *engine, int size); + +u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords); +int intel_ring_cacheline_align(struct i915_request *rq); + +unsigned int intel_ring_update_space(struct intel_ring *ring); + +int intel_ring_pin(struct intel_ring *ring); +void intel_ring_unpin(struct intel_ring *ring); +void intel_ring_reset(struct intel_ring *ring, u32 tail); + +void intel_ring_free(struct kref *ref); + +static inline struct intel_ring *intel_ring_get(struct intel_ring *ring) +{ + kref_get(&ring->ref); + return ring; +} + +static inline void intel_ring_put(struct intel_ring *ring) +{ + kref_put(&ring->ref, intel_ring_free); +} + +static inline void intel_ring_advance(struct i915_request *rq, u32 *cs) +{ + /* Dummy function. + * + * This serves as a placeholder in the code so that the reader + * can compare against the preceding intel_ring_begin() and + * check that the number of dwords emitted matches the space + * reserved for the command packet (i.e. the value passed to + * intel_ring_begin()). + */ + GEM_BUG_ON((rq->ring->vaddr + rq->ring->emit) != cs); +} + +static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) +{ + return pos & (ring->size - 1); +} + +static inline bool +intel_ring_offset_valid(const struct intel_ring *ring, + unsigned int pos) +{ + if (pos & -ring->size) /* must be strictly within the ring */ + return false; + + if (!IS_ALIGNED(pos, 8)) /* must be qword aligned */ + return false; + + return true; +} + +static inline u32 intel_ring_offset(const struct i915_request *rq, void *addr) +{ + /* Don't write ring->size (equivalent to 0) as that hangs some GPUs. */ + u32 offset = addr - rq->ring->vaddr; + GEM_BUG_ON(offset > rq->ring->size); + return intel_ring_wrap(rq->ring, offset); +} + +static inline void +assert_ring_tail_valid(const struct intel_ring *ring, unsigned int tail) +{ + GEM_BUG_ON(!intel_ring_offset_valid(ring, tail)); + + /* + * "Ring Buffer Use" + * Gen2 BSpec "1. Programming Environment" / 1.4.4.6 + * Gen3 BSpec "1c Memory Interface Functions" / 2.3.4.5 + * Gen4+ BSpec "1c Memory Interface and Command Stream" / 5.3.4.5 + * "If the Ring Buffer Head Pointer and the Tail Pointer are on the + * same cacheline, the Head Pointer must not be greater than the Tail + * Pointer." + * + * We use ring->head as the last known location of the actual RING_HEAD, + * it may have advanced but in the worst case it is equally the same + * as ring->head and so we should never program RING_TAIL to advance + * into the same cacheline as ring->head. + */ +#define cacheline(a) round_down(a, CACHELINE_BYTES) + GEM_BUG_ON(cacheline(tail) == cacheline(ring->head) && + tail < ring->head); +#undef cacheline +} + +static inline unsigned int +intel_ring_set_tail(struct intel_ring *ring, unsigned int tail) +{ + /* Whilst writes to the tail are strictly order, there is no + * serialisation between readers and the writers. The tail may be + * read by i915_request_retire() just as it is being updated + * by execlists, as although the breadcrumb is complete, the context + * switch hasn't been seen. + */ + assert_ring_tail_valid(ring, tail); + ring->tail = tail; + return tail; +} + +static inline unsigned int +__intel_ring_space(unsigned int head, unsigned int tail, unsigned int size) +{ + /* + * "If the Ring Buffer Head Pointer and the Tail Pointer are on the + * same cacheline, the Head Pointer must not be greater than the Tail + * Pointer." + */ + GEM_BUG_ON(!is_power_of_2(size)); + return (head - tail - CACHELINE_BYTES) & (size - 1); +} + +#endif /* INTEL_RING_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index bacaa7bb8c9a..a47d5a7c32c9 100644 --- a/drivers/gpu/drm/i915/gt/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -40,6 +40,7 @@ #include "intel_gt_irq.h" #include "intel_gt_pm_irq.h" #include "intel_reset.h" +#include "intel_ring.h" #include "intel_workarounds.h" /* Rough estimate of the typical request size, performing a flush, @@ -47,16 +48,6 @@ */ #define LEGACY_REQUEST_SIZE 200 -unsigned int intel_ring_update_space(struct intel_ring *ring) -{ - unsigned int space; - - space = __intel_ring_space(ring->head, ring->emit, ring->size); - - ring->space = space; - return space; -} - static int gen2_render_ring_flush(struct i915_request *rq, u32 mode) { @@ -322,7 +313,8 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) PIPE_CONTROL_DC_FLUSH_ENABLE | PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL); - *cs++ = rq->timeline->hwsp_offset | PIPE_CONTROL_GLOBAL_GTT; + *cs++ = i915_request_active_timeline(rq)->hwsp_offset | + PIPE_CONTROL_GLOBAL_GTT; *cs++ = rq->fence.seqno; *cs++ = MI_USER_INTERRUPT; @@ -425,7 +417,7 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL); - *cs++ = rq->timeline->hwsp_offset; + *cs++ = i915_request_active_timeline(rq)->hwsp_offset; *cs++ = rq->fence.seqno; *cs++ = MI_USER_INTERRUPT; @@ -439,8 +431,8 @@ static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) { - GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); - GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; @@ -459,8 +451,8 @@ static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs) { int i; - GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); - GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW | MI_FLUSH_DW_STORE_INDEX; *cs++ = I915_GEM_HWS_SEQNO_ADDR | MI_FLUSH_DW_USE_GTT; @@ -930,6 +922,7 @@ static void cancel_requests(struct intel_engine_cs *engine) static void i9xx_submit_request(struct i915_request *request) { i915_request_submit(request); + wmb(); /* paranoid flush writes out of the WCB before mmio */ ENGINE_WRITE(request->engine, RING_TAIL, intel_ring_set_tail(request->ring, request->tail)); @@ -937,8 +930,8 @@ static void i9xx_submit_request(struct i915_request *request) static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs) { - GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); - GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); *cs++ = MI_FLUSH; @@ -960,8 +953,8 @@ static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs) { int i; - GEM_BUG_ON(rq->timeline->hwsp_ggtt != rq->engine->status_page.vma); - GEM_BUG_ON(offset_in_page(rq->timeline->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); + GEM_BUG_ON(i915_request_active_timeline(rq)->hwsp_ggtt != rq->engine->status_page.vma); + GEM_BUG_ON(offset_in_page(i915_request_active_timeline(rq)->hwsp_offset) != I915_GEM_HWS_SEQNO_ADDR); *cs++ = MI_FLUSH; @@ -1184,167 +1177,9 @@ i915_emit_bb_start(struct i915_request *rq, return 0; } -int intel_ring_pin(struct intel_ring *ring) -{ - struct i915_vma *vma = ring->vma; - unsigned int flags; - void *addr; - int ret; - - if (atomic_fetch_inc(&ring->pin_count)) - return 0; - - flags = PIN_GLOBAL; - - /* Ring wraparound at offset 0 sometimes hangs. No idea why. */ - flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); - - if (vma->obj->stolen) - flags |= PIN_MAPPABLE; - else - flags |= PIN_HIGH; - - ret = i915_vma_pin(vma, 0, 0, flags); - if (unlikely(ret)) - goto err_unpin; - - if (i915_vma_is_map_and_fenceable(vma)) - addr = (void __force *)i915_vma_pin_iomap(vma); - else - addr = i915_gem_object_pin_map(vma->obj, - i915_coherent_map_type(vma->vm->i915)); - if (IS_ERR(addr)) { - ret = PTR_ERR(addr); - goto err_ring; - } - - i915_vma_make_unshrinkable(vma); - - GEM_BUG_ON(ring->vaddr); - ring->vaddr = addr; - - return 0; - -err_ring: - i915_vma_unpin(vma); -err_unpin: - atomic_dec(&ring->pin_count); - return ret; -} - -void intel_ring_reset(struct intel_ring *ring, u32 tail) -{ - tail = intel_ring_wrap(ring, tail); - ring->tail = tail; - ring->head = tail; - ring->emit = tail; - intel_ring_update_space(ring); -} - -void intel_ring_unpin(struct intel_ring *ring) -{ - struct i915_vma *vma = ring->vma; - - if (!atomic_dec_and_test(&ring->pin_count)) - return; - - /* Discard any unused bytes beyond that submitted to hw. */ - intel_ring_reset(ring, ring->emit); - - i915_vma_unset_ggtt_write(vma); - if (i915_vma_is_map_and_fenceable(vma)) - i915_vma_unpin_iomap(vma); - else - i915_gem_object_unpin_map(vma->obj); - - GEM_BUG_ON(!ring->vaddr); - ring->vaddr = NULL; - - i915_vma_unpin(vma); - i915_vma_make_purgeable(vma); -} - -static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size) -{ - struct i915_address_space *vm = &ggtt->vm; - struct drm_i915_private *i915 = vm->i915; - struct drm_i915_gem_object *obj; - struct i915_vma *vma; - - obj = i915_gem_object_create_stolen(i915, size); - if (!obj) - obj = i915_gem_object_create_internal(i915, size); - if (IS_ERR(obj)) - return ERR_CAST(obj); - - /* - * Mark ring buffers as read-only from GPU side (so no stray overwrites) - * if supported by the platform's GGTT. - */ - if (vm->has_read_only) - i915_gem_object_set_readonly(obj); - - vma = i915_vma_instance(obj, vm, NULL); - if (IS_ERR(vma)) - goto err; - - return vma; - -err: - i915_gem_object_put(obj); - return vma; -} - -struct intel_ring * -intel_engine_create_ring(struct intel_engine_cs *engine, int size) -{ - struct drm_i915_private *i915 = engine->i915; - struct intel_ring *ring; - struct i915_vma *vma; - - GEM_BUG_ON(!is_power_of_2(size)); - GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES); - - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (!ring) - return ERR_PTR(-ENOMEM); - - kref_init(&ring->ref); - - ring->size = size; - /* Workaround an erratum on the i830 which causes a hang if - * the TAIL pointer points to within the last 2 cachelines - * of the buffer. - */ - ring->effective_size = size; - if (IS_I830(i915) || IS_I845G(i915)) - ring->effective_size -= 2 * CACHELINE_BYTES; - - intel_ring_update_space(ring); - - vma = create_ring_vma(engine->gt->ggtt, size); - if (IS_ERR(vma)) { - kfree(ring); - return ERR_CAST(vma); - } - ring->vma = vma; - - return ring; -} - -void intel_ring_free(struct kref *ref) -{ - struct intel_ring *ring = container_of(ref, typeof(*ring), ref); - - i915_vma_close(ring->vma); - i915_vma_put(ring->vma); - - kfree(ring); -} - static void __ring_context_fini(struct intel_context *ce) { - i915_gem_object_put(ce->state->obj); + i915_vma_put(ce->state); } static void ring_context_destroy(struct kref *ref) @@ -1609,7 +1444,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) struct intel_engine_cs *signaller; *cs++ = MI_LOAD_REGISTER_IMM(num_engines); - for_each_engine(signaller, i915, id) { + for_each_engine(signaller, engine->gt, id) { if (signaller == engine) continue; @@ -1663,7 +1498,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) i915_reg_t last_reg = {}; /* keep gcc quiet */ *cs++ = MI_LOAD_REGISTER_IMM(num_engines); - for_each_engine(signaller, i915, id) { + for_each_engine(signaller, engine->gt, id) { if (signaller == engine) continue; @@ -1676,7 +1511,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags) /* Insert a delay before the next switch! */ *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT; *cs++ = i915_mmio_reg_offset(last_reg); - *cs++ = intel_gt_scratch_offset(rq->engine->gt, + *cs++ = intel_gt_scratch_offset(engine->gt, INTEL_GT_SCRATCH_FIELD_DEFAULT); *cs++ = MI_NOOP; } @@ -1741,46 +1576,22 @@ static int remap_l3(struct i915_request *rq) static int switch_context(struct i915_request *rq) { - struct intel_engine_cs *engine = rq->engine; - struct i915_address_space *vm = vm_alias(rq->hw_context); - unsigned int unwind_mm = 0; - u32 hw_flags = 0; + struct intel_context *ce = rq->hw_context; + struct i915_address_space *vm = vm_alias(ce); int ret; GEM_BUG_ON(HAS_EXECLISTS(rq->i915)); if (vm) { - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); - int loops; - - /* - * Baytail takes a little more convincing that it really needs - * to reload the PD between contexts. It is not just a little - * longer, as adding more stalls after the load_pd_dir (i.e. - * adding a long loop around flush_pd_dir) is not as effective - * as reloading the PD umpteen times. 32 is derived from - * experimentation (gem_exec_parallel/fds) and has no good - * explanation. - */ - loops = 1; - if (engine->id == BCS0 && IS_VALLEYVIEW(engine->i915)) - loops = 32; - - do { - ret = load_pd_dir(rq, ppgtt); - if (ret) - goto err; - } while (--loops); - - if (ppgtt->pd_dirty_engines & engine->mask) { - unwind_mm = engine->mask; - ppgtt->pd_dirty_engines &= ~unwind_mm; - hw_flags = MI_FORCE_RESTORE; - } + ret = load_pd_dir(rq, i915_vm_to_ppgtt(vm)); + if (ret) + return ret; } - if (rq->hw_context->state) { - GEM_BUG_ON(engine->id != RCS0); + if (ce->state) { + u32 hw_flags; + + GEM_BUG_ON(rq->engine->id != RCS0); /* * The kernel context(s) is treated as pure scratch and is not @@ -1789,22 +1600,25 @@ static int switch_context(struct i915_request *rq) * as nothing actually executes using the kernel context; it * is purely used for flushing user contexts. */ + hw_flags = 0; if (i915_gem_context_is_kernel(rq->gem_context)) hw_flags = MI_RESTORE_INHIBIT; ret = mi_set_context(rq, hw_flags); if (ret) - goto err_mm; + return ret; } if (vm) { + struct intel_engine_cs *engine = rq->engine; + ret = engine->emit_flush(rq, EMIT_INVALIDATE); if (ret) - goto err_mm; + return ret; ret = flush_pd_dir(rq); if (ret) - goto err_mm; + return ret; /* * Not only do we need a full barrier (post-sync write) after @@ -1816,24 +1630,18 @@ static int switch_context(struct i915_request *rq) */ ret = engine->emit_flush(rq, EMIT_INVALIDATE); if (ret) - goto err_mm; + return ret; ret = engine->emit_flush(rq, EMIT_FLUSH); if (ret) - goto err_mm; + return ret; } ret = remap_l3(rq); if (ret) - goto err_mm; + return ret; return 0; - -err_mm: - if (unwind_mm) - i915_vm_to_ppgtt(vm)->pd_dirty_engines |= unwind_mm; -err: - return ret; } static int ring_request_alloc(struct i915_request *request) @@ -1841,7 +1649,7 @@ static int ring_request_alloc(struct i915_request *request) int ret; GEM_BUG_ON(!intel_context_is_pinned(request->hw_context)); - GEM_BUG_ON(request->timeline->has_initial_breadcrumb); + GEM_BUG_ON(i915_request_timeline(request)->has_initial_breadcrumb); /* * Flush enough space to reduce the likelihood of waiting after @@ -1863,146 +1671,6 @@ static int ring_request_alloc(struct i915_request *request) return 0; } -static noinline int -wait_for_space(struct intel_ring *ring, - struct intel_timeline *tl, - unsigned int bytes) -{ - struct i915_request *target; - long timeout; - - if (intel_ring_update_space(ring) >= bytes) - return 0; - - GEM_BUG_ON(list_empty(&tl->requests)); - list_for_each_entry(target, &tl->requests, link) { - if (target->ring != ring) - continue; - - /* Would completion of this request free enough space? */ - if (bytes <= __intel_ring_space(target->postfix, - ring->emit, ring->size)) - break; - } - - if (GEM_WARN_ON(&target->link == &tl->requests)) - return -ENOSPC; - - timeout = i915_request_wait(target, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); - if (timeout < 0) - return timeout; - - i915_request_retire_upto(target); - - intel_ring_update_space(ring); - GEM_BUG_ON(ring->space < bytes); - return 0; -} - -u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords) -{ - struct intel_ring *ring = rq->ring; - const unsigned int remain_usable = ring->effective_size - ring->emit; - const unsigned int bytes = num_dwords * sizeof(u32); - unsigned int need_wrap = 0; - unsigned int total_bytes; - u32 *cs; - - /* Packets must be qword aligned. */ - GEM_BUG_ON(num_dwords & 1); - - total_bytes = bytes + rq->reserved_space; - GEM_BUG_ON(total_bytes > ring->effective_size); - - if (unlikely(total_bytes > remain_usable)) { - const int remain_actual = ring->size - ring->emit; - - if (bytes > remain_usable) { - /* - * Not enough space for the basic request. So need to - * flush out the remainder and then wait for - * base + reserved. - */ - total_bytes += remain_actual; - need_wrap = remain_actual | 1; - } else { - /* - * The base request will fit but the reserved space - * falls off the end. So we don't need an immediate - * wrap and only need to effectively wait for the - * reserved size from the start of ringbuffer. - */ - total_bytes = rq->reserved_space + remain_actual; - } - } - - if (unlikely(total_bytes > ring->space)) { - int ret; - - /* - * Space is reserved in the ringbuffer for finalising the - * request, as that cannot be allowed to fail. During request - * finalisation, reserved_space is set to 0 to stop the - * overallocation and the assumption is that then we never need - * to wait (which has the risk of failing with EINTR). - * - * See also i915_request_alloc() and i915_request_add(). - */ - GEM_BUG_ON(!rq->reserved_space); - - ret = wait_for_space(ring, rq->timeline, total_bytes); - if (unlikely(ret)) - return ERR_PTR(ret); - } - - if (unlikely(need_wrap)) { - need_wrap &= ~1; - GEM_BUG_ON(need_wrap > ring->space); - GEM_BUG_ON(ring->emit + need_wrap > ring->size); - GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64))); - - /* Fill the tail with MI_NOOP */ - memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64)); - ring->space -= need_wrap; - ring->emit = 0; - } - - GEM_BUG_ON(ring->emit > ring->size - bytes); - GEM_BUG_ON(ring->space < bytes); - cs = ring->vaddr + ring->emit; - GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs))); - ring->emit += bytes; - ring->space -= bytes; - - return cs; -} - -/* Align the ring tail to a cacheline boundary */ -int intel_ring_cacheline_align(struct i915_request *rq) -{ - int num_dwords; - void *cs; - - num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32); - if (num_dwords == 0) - return 0; - - num_dwords = CACHELINE_DWORDS - num_dwords; - GEM_BUG_ON(num_dwords & 1); - - cs = intel_ring_begin(rq, num_dwords); - if (IS_ERR(cs)) - return PTR_ERR(cs); - - memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2); - intel_ring_advance(rq, cs); - - GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1)); - return 0; -} - static void gen6_bsd_submit_request(struct i915_request *request) { struct intel_uncore *uncore = request->engine->uncore; diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h new file mode 100644 index 000000000000..d9f17f38e0cc --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h @@ -0,0 +1,51 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RING_TYPES_H +#define INTEL_RING_TYPES_H + +#include <linux/atomic.h> +#include <linux/kref.h> +#include <linux/types.h> + +/* + * Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, + * but keeps the logic simple. Indeed, the whole purpose of this macro is just + * to give some inclination as to some of the magic values used in the various + * workarounds! + */ +#define CACHELINE_BYTES 64 +#define CACHELINE_DWORDS (CACHELINE_BYTES / sizeof(u32)) + +struct i915_vma; + +struct intel_ring { + struct kref ref; + struct i915_vma *vma; + void *vaddr; + + /* + * As we have two types of rings, one global to the engine used + * by ringbuffer submission and those that are exclusive to a + * context used by execlists, we have to play safe and allow + * atomic updates to the pin_count. However, the actual pinning + * of the context is either done during initialisation for + * ringbuffer submission or serialised as part of the context + * pinning for execlists, and so we do not need a mutex ourselves + * to serialise intel_ring_pin/intel_ring_unpin. + */ + atomic_t pin_count; + + u32 head; + u32 tail; + u32 emit; + + u32 space; + u32 size; + u32 effective_size; +}; + +#endif /* INTEL_RING_TYPES_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c new file mode 100644 index 000000000000..20d6ee148afc --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -0,0 +1,1872 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_gt.h" +#include "intel_gt_irq.h" +#include "intel_gt_pm_irq.h" +#include "intel_rps.h" +#include "intel_sideband.h" +#include "../../../platform/x86/intel_ips.h" + +/* + * Lock protecting IPS related data structures + */ +static DEFINE_SPINLOCK(mchdev_lock); + +static struct intel_gt *rps_to_gt(struct intel_rps *rps) +{ + return container_of(rps, struct intel_gt, rps); +} + +static struct drm_i915_private *rps_to_i915(struct intel_rps *rps) +{ + return rps_to_gt(rps)->i915; +} + +static struct intel_uncore *rps_to_uncore(struct intel_rps *rps) +{ + return rps_to_gt(rps)->uncore; +} + +static u32 rps_pm_sanitize_mask(struct intel_rps *rps, u32 mask) +{ + return mask & ~rps->pm_intrmsk_mbz; +} + +static u32 rps_pm_mask(struct intel_rps *rps, u8 val) +{ + u32 mask = 0; + + /* We use UP_EI_EXPIRED interrupts for both up/down in manual mode */ + if (val > rps->min_freq_softlimit) + mask |= (GEN6_PM_RP_UP_EI_EXPIRED | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_DOWN_TIMEOUT); + + if (val < rps->max_freq_softlimit) + mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; + + mask &= rps->pm_events; + + return rps_pm_sanitize_mask(rps, ~mask); +} + +static void rps_reset_ei(struct intel_rps *rps) +{ + memset(&rps->ei, 0, sizeof(rps->ei)); +} + +static void rps_enable_interrupts(struct intel_rps *rps) +{ + struct intel_gt *gt = rps_to_gt(rps); + + rps_reset_ei(rps); + + if (IS_VALLEYVIEW(gt->i915)) + /* WaGsvRC0ResidencyMethod:vlv */ + rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED; + else + rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD | + GEN6_PM_RP_DOWN_THRESHOLD | + GEN6_PM_RP_DOWN_TIMEOUT); + + spin_lock_irq(>->irq_lock); + gen6_gt_pm_enable_irq(gt, rps->pm_events); + spin_unlock_irq(>->irq_lock); + + intel_uncore_write(gt->uncore, GEN6_PMINTRMSK, + rps_pm_mask(rps, rps->cur_freq)); +} + +static void gen6_rps_reset_interrupts(struct intel_rps *rps) +{ + gen6_gt_pm_reset_iir(rps_to_gt(rps), GEN6_PM_RPS_EVENTS); +} + +static void gen11_rps_reset_interrupts(struct intel_rps *rps) +{ + while (gen11_gt_reset_one_iir(rps_to_gt(rps), 0, GEN11_GTPM)) + ; +} + +static void rps_reset_interrupts(struct intel_rps *rps) +{ + struct intel_gt *gt = rps_to_gt(rps); + + spin_lock_irq(>->irq_lock); + if (INTEL_GEN(gt->i915) >= 11) + gen11_rps_reset_interrupts(rps); + else + gen6_rps_reset_interrupts(rps); + + rps->pm_iir = 0; + spin_unlock_irq(>->irq_lock); +} + +static void rps_disable_interrupts(struct intel_rps *rps) +{ + struct intel_gt *gt = rps_to_gt(rps); + + rps->pm_events = 0; + + intel_uncore_write(gt->uncore, GEN6_PMINTRMSK, + rps_pm_sanitize_mask(rps, ~0u)); + + spin_lock_irq(>->irq_lock); + gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); + spin_unlock_irq(>->irq_lock); + + intel_synchronize_irq(gt->i915); + + /* + * Now that we will not be generating any more work, flush any + * outstanding tasks. As we are called on the RPS idle path, + * we will reset the GPU to minimum frequencies, so the current + * state of the worker can be discarded. + */ + cancel_work_sync(&rps->work); + + rps_reset_interrupts(rps); +} + +static const struct cparams { + u16 i; + u16 t; + u16 m; + u16 c; +} cparams[] = { + { 1, 1333, 301, 28664 }, + { 1, 1066, 294, 24460 }, + { 1, 800, 294, 25192 }, + { 0, 1333, 276, 27605 }, + { 0, 1066, 276, 27605 }, + { 0, 800, 231, 23784 }, +}; + +static void gen5_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); + u8 fmax, fmin, fstart; + u32 rgvmodectl; + int c_m, i; + + if (i915->fsb_freq <= 3200) + c_m = 0; + else if (i915->fsb_freq <= 4800) + c_m = 1; + else + c_m = 2; + + for (i = 0; i < ARRAY_SIZE(cparams); i++) { + if (cparams[i].i == c_m && cparams[i].t == i915->mem_freq) { + rps->ips.m = cparams[i].m; + rps->ips.c = cparams[i].c; + break; + } + } + + rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); + + /* Set up min, max, and cur for interrupt handling */ + fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; + fmin = (rgvmodectl & MEMMODE_FMIN_MASK); + fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> + MEMMODE_FSTART_SHIFT; + DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", + fmax, fmin, fstart); + + rps->min_freq = fmax; + rps->max_freq = fmin; + + rps->idle_freq = rps->min_freq; + rps->cur_freq = rps->idle_freq; +} + +static unsigned long +__ips_chipset_val(struct intel_ips *ips) +{ + struct intel_uncore *uncore = + rps_to_uncore(container_of(ips, struct intel_rps, ips)); + unsigned long now = jiffies_to_msecs(jiffies), dt; + unsigned long result; + u64 total, delta; + + lockdep_assert_held(&mchdev_lock); + + /* + * Prevent division-by-zero if we are asking too fast. + * Also, we don't get interesting results if we are polling + * faster than once in 10ms, so just return the saved value + * in such cases. + */ + dt = now - ips->last_time1; + if (dt <= 10) + return ips->chipset_power; + + /* FIXME: handle per-counter overflow */ + total = intel_uncore_read(uncore, DMIEC); + total += intel_uncore_read(uncore, DDREC); + total += intel_uncore_read(uncore, CSIEC); + + delta = total - ips->last_count1; + + result = div_u64(div_u64(ips->m * delta, dt) + ips->c, 10); + + ips->last_count1 = total; + ips->last_time1 = now; + + ips->chipset_power = result; + + return result; +} + +static unsigned long ips_mch_val(struct intel_uncore *uncore) +{ + unsigned int m, x, b; + u32 tsfs; + + tsfs = intel_uncore_read(uncore, TSFS); + x = intel_uncore_read8(uncore, TR1); + + b = tsfs & TSFS_INTR_MASK; + m = (tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT; + + return m * x / 127 - b; +} + +static int _pxvid_to_vd(u8 pxvid) +{ + if (pxvid == 0) + return 0; + + if (pxvid >= 8 && pxvid < 31) + pxvid = 31; + + return (pxvid + 2) * 125; +} + +static u32 pvid_to_extvid(struct drm_i915_private *i915, u8 pxvid) +{ + const int vd = _pxvid_to_vd(pxvid); + + if (INTEL_INFO(i915)->is_mobile) + return max(vd - 1125, 0); + + return vd; +} + +static void __gen5_ips_update(struct intel_ips *ips) +{ + struct intel_uncore *uncore = + rps_to_uncore(container_of(ips, struct intel_rps, ips)); + u64 now, delta, dt; + u32 count; + + lockdep_assert_held(&mchdev_lock); + + now = ktime_get_raw_ns(); + dt = now - ips->last_time2; + do_div(dt, NSEC_PER_MSEC); + + /* Don't divide by 0 */ + if (dt <= 10) + return; + + count = intel_uncore_read(uncore, GFXEC); + delta = count - ips->last_count2; + + ips->last_count2 = count; + ips->last_time2 = now; + + /* More magic constants... */ + ips->gfx_power = div_u64(delta * 1181, dt * 10); +} + +static void gen5_rps_update(struct intel_rps *rps) +{ + spin_lock_irq(&mchdev_lock); + __gen5_ips_update(&rps->ips); + spin_unlock_irq(&mchdev_lock); +} + +static bool gen5_rps_set(struct intel_rps *rps, u8 val) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u16 rgvswctl; + + lockdep_assert_held(&mchdev_lock); + + rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); + if (rgvswctl & MEMCTL_CMD_STS) { + DRM_DEBUG("gpu busy, RCS change rejected\n"); + return false; /* still busy with another command */ + } + + /* Invert the frequency bin into an ips delay */ + val = rps->max_freq - val; + val = rps->min_freq + val; + + rgvswctl = + (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | + (val << MEMCTL_FREQ_SHIFT) | + MEMCTL_SFCAVM; + intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); + intel_uncore_posting_read16(uncore, MEMSWCTL); + + rgvswctl |= MEMCTL_CMD_STS; + intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); + + return true; +} + +static unsigned long intel_pxfreq(u32 vidfreq) +{ + int div = (vidfreq & 0x3f0000) >> 16; + int post = (vidfreq & 0x3000) >> 12; + int pre = (vidfreq & 0x7); + + if (!pre) + return 0; + + return div * 133333 / (pre << post); +} + +static unsigned int init_emon(struct intel_uncore *uncore) +{ + u8 pxw[16]; + int i; + + /* Disable to program */ + intel_uncore_write(uncore, ECR, 0); + intel_uncore_posting_read(uncore, ECR); + + /* Program energy weights for various events */ + intel_uncore_write(uncore, SDEW, 0x15040d00); + intel_uncore_write(uncore, CSIEW0, 0x007f0000); + intel_uncore_write(uncore, CSIEW1, 0x1e220004); + intel_uncore_write(uncore, CSIEW2, 0x04000004); + + for (i = 0; i < 5; i++) + intel_uncore_write(uncore, PEW(i), 0); + for (i = 0; i < 3; i++) + intel_uncore_write(uncore, DEW(i), 0); + + /* Program P-state weights to account for frequency power adjustment */ + for (i = 0; i < 16; i++) { + u32 pxvidfreq = intel_uncore_read(uncore, PXVFREQ(i)); + unsigned int freq = intel_pxfreq(pxvidfreq); + unsigned int vid = + (pxvidfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; + unsigned int val; + + val = vid * vid * freq / 1000 * 255; + val /= 127 * 127 * 900; + + pxw[i] = val; + } + /* Render standby states get 0 weight */ + pxw[14] = 0; + pxw[15] = 0; + + for (i = 0; i < 4; i++) { + intel_uncore_write(uncore, PXW(i), + pxw[i * 4 + 0] << 24 | + pxw[i * 4 + 1] << 16 | + pxw[i * 4 + 2] << 8 | + pxw[i * 4 + 3] << 0); + } + + /* Adjust magic regs to magic values (more experimental results) */ + intel_uncore_write(uncore, OGW0, 0); + intel_uncore_write(uncore, OGW1, 0); + intel_uncore_write(uncore, EG0, 0x00007f00); + intel_uncore_write(uncore, EG1, 0x0000000e); + intel_uncore_write(uncore, EG2, 0x000e0000); + intel_uncore_write(uncore, EG3, 0x68000300); + intel_uncore_write(uncore, EG4, 0x42000000); + intel_uncore_write(uncore, EG5, 0x00140031); + intel_uncore_write(uncore, EG6, 0); + intel_uncore_write(uncore, EG7, 0); + + for (i = 0; i < 8; i++) + intel_uncore_write(uncore, PXWL(i), 0); + + /* Enable PMON + select events */ + intel_uncore_write(uncore, ECR, 0x80000019); + + return intel_uncore_read(uncore, LCFUSE02) & LCFUSE_HIV_MASK; +} + +static bool gen5_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u8 fstart, vstart; + u32 rgvmodectl; + + spin_lock_irq(&mchdev_lock); + + rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); + + /* Enable temp reporting */ + intel_uncore_write16(uncore, PMMISC, + intel_uncore_read16(uncore, PMMISC) | MCPPCE_EN); + intel_uncore_write16(uncore, TSC1, + intel_uncore_read16(uncore, TSC1) | TSE); + + /* 100ms RC evaluation intervals */ + intel_uncore_write(uncore, RCUPEI, 100000); + intel_uncore_write(uncore, RCDNEI, 100000); + + /* Set max/min thresholds to 90ms and 80ms respectively */ + intel_uncore_write(uncore, RCBMAXAVG, 90000); + intel_uncore_write(uncore, RCBMINAVG, 80000); + + intel_uncore_write(uncore, MEMIHYST, 1); + + /* Set up min, max, and cur for interrupt handling */ + fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> + MEMMODE_FSTART_SHIFT; + + vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) & + PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; + + intel_uncore_write(uncore, + MEMINTREN, + MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); + + intel_uncore_write(uncore, VIDSTART, vstart); + intel_uncore_posting_read(uncore, VIDSTART); + + rgvmodectl |= MEMMODE_SWMODE_EN; + intel_uncore_write(uncore, MEMMODECTL, rgvmodectl); + + if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & + MEMCTL_CMD_STS) == 0, 10)) + DRM_ERROR("stuck trying to change perf mode\n"); + mdelay(1); + + gen5_rps_set(rps, rps->cur_freq); + + rps->ips.last_count1 = intel_uncore_read(uncore, DMIEC); + rps->ips.last_count1 += intel_uncore_read(uncore, DDREC); + rps->ips.last_count1 += intel_uncore_read(uncore, CSIEC); + rps->ips.last_time1 = jiffies_to_msecs(jiffies); + + rps->ips.last_count2 = intel_uncore_read(uncore, GFXEC); + rps->ips.last_time2 = ktime_get_raw_ns(); + + spin_unlock_irq(&mchdev_lock); + + rps->ips.corr = init_emon(uncore); + + return true; +} + +static void gen5_rps_disable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u16 rgvswctl; + + spin_lock_irq(&mchdev_lock); + + rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); + + /* Ack interrupts, disable EFC interrupt */ + intel_uncore_write(uncore, MEMINTREN, + intel_uncore_read(uncore, MEMINTREN) & + ~MEMINT_EVAL_CHG_EN); + intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); + intel_uncore_write(uncore, DEIER, + intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT); + intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT); + intel_uncore_write(uncore, DEIMR, + intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT); + + /* Go back to the starting frequency */ + gen5_rps_set(rps, rps->idle_freq); + mdelay(1); + rgvswctl |= MEMCTL_CMD_STS; + intel_uncore_write(uncore, MEMSWCTL, rgvswctl); + mdelay(1); + + spin_unlock_irq(&mchdev_lock); +} + +static u32 rps_limits(struct intel_rps *rps, u8 val) +{ + u32 limits; + + /* + * Only set the down limit when we've reached the lowest level to avoid + * getting more interrupts, otherwise leave this clear. This prevents a + * race in the hw when coming out of rc6: There's a tiny window where + * the hw runs at the minimal clock before selecting the desired + * frequency, if the down threshold expires in that window we will not + * receive a down interrupt. + */ + if (INTEL_GEN(rps_to_i915(rps)) >= 9) { + limits = rps->max_freq_softlimit << 23; + if (val <= rps->min_freq_softlimit) + limits |= rps->min_freq_softlimit << 14; + } else { + limits = rps->max_freq_softlimit << 24; + if (val <= rps->min_freq_softlimit) + limits |= rps->min_freq_softlimit << 16; + } + + return limits; +} + +static void rps_set_power(struct intel_rps *rps, int new_power) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 threshold_up = 0, threshold_down = 0; /* in % */ + u32 ei_up = 0, ei_down = 0; + + lockdep_assert_held(&rps->power.mutex); + + if (new_power == rps->power.mode) + return; + + /* Note the units here are not exactly 1us, but 1280ns. */ + switch (new_power) { + case LOW_POWER: + /* Upclock if more than 95% busy over 16ms */ + ei_up = 16000; + threshold_up = 95; + + /* Downclock if less than 85% busy over 32ms */ + ei_down = 32000; + threshold_down = 85; + break; + + case BETWEEN: + /* Upclock if more than 90% busy over 13ms */ + ei_up = 13000; + threshold_up = 90; + + /* Downclock if less than 75% busy over 32ms */ + ei_down = 32000; + threshold_down = 75; + break; + + case HIGH_POWER: + /* Upclock if more than 85% busy over 10ms */ + ei_up = 10000; + threshold_up = 85; + + /* Downclock if less than 60% busy over 32ms */ + ei_down = 32000; + threshold_down = 60; + break; + } + + /* When byt can survive without system hang with dynamic + * sw freq adjustments, this restriction can be lifted. + */ + if (IS_VALLEYVIEW(i915)) + goto skip_hw_write; + + intel_uncore_write(uncore, GEN6_RP_UP_EI, + GT_INTERVAL_FROM_US(i915, ei_up)); + intel_uncore_write(uncore, GEN6_RP_UP_THRESHOLD, + GT_INTERVAL_FROM_US(i915, + ei_up * threshold_up / 100)); + + intel_uncore_write(uncore, GEN6_RP_DOWN_EI, + GT_INTERVAL_FROM_US(i915, ei_down)); + intel_uncore_write(uncore, GEN6_RP_DOWN_THRESHOLD, + GT_INTERVAL_FROM_US(i915, + ei_down * threshold_down / 100)); + + intel_uncore_write(uncore, GEN6_RP_CONTROL, + (INTEL_GEN(i915) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_AVG); + +skip_hw_write: + rps->power.mode = new_power; + rps->power.up_threshold = threshold_up; + rps->power.down_threshold = threshold_down; +} + +static void gen6_rps_set_thresholds(struct intel_rps *rps, u8 val) +{ + int new_power; + + new_power = rps->power.mode; + switch (rps->power.mode) { + case LOW_POWER: + if (val > rps->efficient_freq + 1 && + val > rps->cur_freq) + new_power = BETWEEN; + break; + + case BETWEEN: + if (val <= rps->efficient_freq && + val < rps->cur_freq) + new_power = LOW_POWER; + else if (val >= rps->rp0_freq && + val > rps->cur_freq) + new_power = HIGH_POWER; + break; + + case HIGH_POWER: + if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && + val < rps->cur_freq) + new_power = BETWEEN; + break; + } + /* Max/min bins are special */ + if (val <= rps->min_freq_softlimit) + new_power = LOW_POWER; + if (val >= rps->max_freq_softlimit) + new_power = HIGH_POWER; + + mutex_lock(&rps->power.mutex); + if (rps->power.interactive) + new_power = HIGH_POWER; + rps_set_power(rps, new_power); + mutex_unlock(&rps->power.mutex); +} + +void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive) +{ + mutex_lock(&rps->power.mutex); + if (interactive) { + if (!rps->power.interactive++ && rps->active) + rps_set_power(rps, HIGH_POWER); + } else { + GEM_BUG_ON(!rps->power.interactive); + rps->power.interactive--; + } + mutex_unlock(&rps->power.mutex); +} + +static int gen6_rps_set(struct intel_rps *rps, u8 val) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 swreq; + + if (INTEL_GEN(i915) >= 9) + swreq = GEN9_FREQUENCY(val); + else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) + swreq = HSW_FREQUENCY(val); + else + swreq = (GEN6_FREQUENCY(val) | + GEN6_OFFSET(0) | + GEN6_AGGRESSIVE_TURBO); + intel_uncore_write(uncore, GEN6_RPNSWREQ, swreq); + + return 0; +} + +static int vlv_rps_set(struct intel_rps *rps, u8 val) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + int err; + + vlv_punit_get(i915); + err = vlv_punit_write(i915, PUNIT_REG_GPU_FREQ_REQ, val); + vlv_punit_put(i915); + + return err; +} + +static int rps_set(struct intel_rps *rps, u8 val) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + int err; + + if (INTEL_GEN(i915) < 6) + return 0; + + if (val == rps->last_freq) + return 0; + + if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) + err = vlv_rps_set(rps, val); + else + err = gen6_rps_set(rps, val); + if (err) + return err; + + gen6_rps_set_thresholds(rps, val); + rps->last_freq = val; + + return 0; +} + +void intel_rps_unpark(struct intel_rps *rps) +{ + u8 freq; + + if (!rps->enabled) + return; + + /* + * Use the user's desired frequency as a guide, but for better + * performance, jump directly to RPe as our starting frequency. + */ + mutex_lock(&rps->lock); + rps->active = true; + freq = max(rps->cur_freq, rps->efficient_freq), + freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit); + intel_rps_set(rps, freq); + rps->last_adj = 0; + mutex_unlock(&rps->lock); + + if (INTEL_GEN(rps_to_i915(rps)) >= 6) + rps_enable_interrupts(rps); + + if (IS_GEN(rps_to_i915(rps), 5)) + gen5_rps_update(rps); +} + +void intel_rps_park(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + if (!rps->enabled) + return; + + if (INTEL_GEN(i915) >= 6) + rps_disable_interrupts(rps); + + rps->active = false; + if (rps->last_freq <= rps->idle_freq) + return; + + /* + * The punit delays the write of the frequency and voltage until it + * determines the GPU is awake. During normal usage we don't want to + * waste power changing the frequency if the GPU is sleeping (rc6). + * However, the GPU and driver is now idle and we do not want to delay + * switching to minimum voltage (reducing power whilst idle) as we do + * not expect to be woken in the near future and so must flush the + * change by waking the device. + * + * We choose to take the media powerwell (either would do to trick the + * punit into committing the voltage change) as that takes a lot less + * power than the render powerwell. + */ + intel_uncore_forcewake_get(rps_to_uncore(rps), FORCEWAKE_MEDIA); + rps_set(rps, rps->idle_freq); + intel_uncore_forcewake_put(rps_to_uncore(rps), FORCEWAKE_MEDIA); +} + +void intel_rps_boost(struct i915_request *rq) +{ + struct intel_rps *rps = &rq->engine->gt->rps; + unsigned long flags; + + if (i915_request_signaled(rq) || !rps->active) + return; + + /* Serializes with i915_request_retire() */ + spin_lock_irqsave(&rq->lock, flags); + if (!i915_request_has_waitboost(rq) && + !dma_fence_is_signaled_locked(&rq->fence)) { + rq->flags |= I915_REQUEST_WAITBOOST; + + if (!atomic_fetch_inc(&rps->num_waiters) && + READ_ONCE(rps->cur_freq) < rps->boost_freq) + schedule_work(&rps->work); + + atomic_inc(&rps->boosts); + } + spin_unlock_irqrestore(&rq->lock, flags); +} + +int intel_rps_set(struct intel_rps *rps, u8 val) +{ + int err = 0; + + lockdep_assert_held(&rps->lock); + GEM_BUG_ON(val > rps->max_freq); + GEM_BUG_ON(val < rps->min_freq); + + if (rps->active) { + err = rps_set(rps, val); + + /* + * Make sure we continue to get interrupts + * until we hit the minimum or maximum frequencies. + */ + if (INTEL_GEN(rps_to_i915(rps)) >= 6) { + struct intel_uncore *uncore = rps_to_uncore(rps); + + intel_uncore_write(uncore, GEN6_RP_INTERRUPT_LIMITS, + rps_limits(rps, val)); + + intel_uncore_write(uncore, GEN6_PMINTRMSK, + rps_pm_mask(rps, val)); + } + } + + if (err == 0) + rps->cur_freq = val; + + return err; +} + +static void gen6_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); + + /* All of these values are in units of 50MHz */ + + /* static values from HW: RP0 > RP1 > RPn (min_freq) */ + if (IS_GEN9_LP(i915)) { + u32 rp_state_cap = intel_uncore_read(uncore, BXT_RP_STATE_CAP); + + rps->rp0_freq = (rp_state_cap >> 16) & 0xff; + rps->rp1_freq = (rp_state_cap >> 8) & 0xff; + rps->min_freq = (rp_state_cap >> 0) & 0xff; + } else { + u32 rp_state_cap = intel_uncore_read(uncore, GEN6_RP_STATE_CAP); + + rps->rp0_freq = (rp_state_cap >> 0) & 0xff; + rps->rp1_freq = (rp_state_cap >> 8) & 0xff; + rps->min_freq = (rp_state_cap >> 16) & 0xff; + } + + /* hw_max = RP0 until we check for overclocking */ + rps->max_freq = rps->rp0_freq; + + rps->efficient_freq = rps->rp1_freq; + if (IS_HASWELL(i915) || IS_BROADWELL(i915) || + IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { + u32 ddcc_status = 0; + + if (sandybridge_pcode_read(i915, + HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, + &ddcc_status, NULL) == 0) + rps->efficient_freq = + clamp_t(u8, + (ddcc_status >> 8) & 0xff, + rps->min_freq, + rps->max_freq); + } + + if (IS_GEN9_BC(i915) || INTEL_GEN(i915) >= 10) { + /* Store the frequency values in 16.66 MHZ units, which is + * the natural hardware unit for SKL + */ + rps->rp0_freq *= GEN9_FREQ_SCALER; + rps->rp1_freq *= GEN9_FREQ_SCALER; + rps->min_freq *= GEN9_FREQ_SCALER; + rps->max_freq *= GEN9_FREQ_SCALER; + rps->efficient_freq *= GEN9_FREQ_SCALER; + } +} + +static bool rps_reset(struct intel_rps *rps) +{ + /* force a reset */ + rps->power.mode = -1; + rps->last_freq = -1; + + if (rps_set(rps, rps->min_freq)) { + DRM_ERROR("Failed to reset RPS to initial values\n"); + return false; + } + + rps->cur_freq = rps->min_freq; + return true; +} + +/* See the Gen9_GT_PM_Programming_Guide doc for the below */ +static bool gen9_rps_enable(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); + + /* Program defaults and thresholds for RPS */ + if (IS_GEN(i915, 9)) + intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, + GEN9_FREQUENCY(rps->rp1_freq)); + + /* 1 second timeout */ + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, + GT_INTERVAL_FROM_US(i915, 1000000)); + + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 0xa); + + return rps_reset(rps); +} + +static bool gen8_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + + intel_uncore_write_fw(uncore, GEN6_RC_VIDEO_FREQ, + HSW_FREQUENCY(rps->rp1_freq)); + + /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, + 100000000 / 128); /* 1 second timeout */ + + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + + return rps_reset(rps); +} + +static bool gen6_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + + /* Power down if completely idle for over 50ms */ + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 50000); + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + + return rps_reset(rps); +} + +static int chv_rps_max_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); + + switch (RUNTIME_INFO(i915)->sseu.eu_total) { + case 8: + /* (2 * 4) config */ + val >>= FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT; + break; + case 12: + /* (2 * 6) config */ + val >>= FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT; + break; + case 16: + /* (2 * 8) config */ + default: + /* Setting (2 * 8) Min RP0 for any other combination */ + val >>= FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT; + break; + } + + return val & FB_GFX_FREQ_FUSE_MASK; +} + +static int chv_rps_rpe_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, PUNIT_GPU_DUTYCYCLE_REG); + val >>= PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT; + + return val & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; +} + +static int chv_rps_guar_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, FB_GFX_FMAX_AT_VMAX_FUSE); + + return val & FB_GFX_FREQ_FUSE_MASK; +} + +static u32 chv_rps_min_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, FB_GFX_FMIN_AT_VMIN_FUSE); + val >>= FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT; + + return val & FB_GFX_FREQ_FUSE_MASK; +} + +static bool chv_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + /* 1: Program defaults and thresholds for RPS*/ + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); + intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); + intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); + + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + + /* 2: Enable RPS */ + intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_AVG); + + /* Setting Fixed Bias */ + vlv_punit_get(i915); + + val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50; + vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); + + val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); + + vlv_punit_put(i915); + + /* RPS code assumes GPLL is used */ + WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); + + DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); + DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); + + return rps_reset(rps); +} + +static int vlv_rps_guar_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val, rp1; + + val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); + + rp1 = val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK; + rp1 >>= FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; + + return rp1; +} + +static int vlv_rps_max_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val, rp0; + + val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FREQ_FUSE); + + rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; + /* Clamp to max */ + rp0 = min_t(u32, rp0, 0xea); + + return rp0; +} + +static int vlv_rps_rpe_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val, rpe; + + val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_LO); + rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; + val = vlv_nc_read(i915, IOSF_NC_FB_GFX_FMAX_FUSE_HI); + rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; + + return rpe; +} + +static int vlv_rps_min_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + val = vlv_punit_read(i915, PUNIT_REG_GPU_LFM) & 0xff; + /* + * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value + * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on + * a BYT-M B0 the above register contains 0xbf. Moreover when setting + * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 + * to make sure it matches what Punit accepts. + */ + return max_t(u32, val, 0xc0); +} + +static bool vlv_rps_enable(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_TIMEOUT, 1000000); + intel_uncore_write_fw(uncore, GEN6_RP_UP_THRESHOLD, 59400); + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_THRESHOLD, 245000); + intel_uncore_write_fw(uncore, GEN6_RP_UP_EI, 66000); + intel_uncore_write_fw(uncore, GEN6_RP_DOWN_EI, 350000); + + intel_uncore_write_fw(uncore, GEN6_RP_IDLE_HYSTERSIS, 10); + + intel_uncore_write_fw(uncore, GEN6_RP_CONTROL, + GEN6_RP_MEDIA_TURBO | + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_CONT); + + vlv_punit_get(i915); + + /* Setting Fixed Bias */ + val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875; + vlv_punit_write(i915, VLV_TURBO_SOC_OVERRIDE, val); + + val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); + + vlv_punit_put(i915); + + /* RPS code assumes GPLL is used */ + WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); + + DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); + DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); + + return rps_reset(rps); +} + +static unsigned long __ips_gfx_val(struct intel_ips *ips) +{ + struct intel_rps *rps = container_of(ips, typeof(*rps), ips); + struct intel_uncore *uncore = rps_to_uncore(rps); + unsigned long t, corr, state1, corr2, state2; + u32 pxvid, ext_v; + + lockdep_assert_held(&mchdev_lock); + + pxvid = intel_uncore_read(uncore, PXVFREQ(rps->cur_freq)); + pxvid = (pxvid >> 24) & 0x7f; + ext_v = pvid_to_extvid(rps_to_i915(rps), pxvid); + + state1 = ext_v; + + /* Revel in the empirically derived constants */ + + /* Correction factor in 1/100000 units */ + t = ips_mch_val(uncore); + if (t > 80) + corr = t * 2349 + 135940; + else if (t >= 50) + corr = t * 964 + 29317; + else /* < 50 */ + corr = t * 301 + 1004; + + corr = corr * 150142 * state1 / 10000 - 78642; + corr /= 100000; + corr2 = corr * ips->corr; + + state2 = corr2 * state1 / 10000; + state2 /= 100; /* convert to mW */ + + __gen5_ips_update(ips); + + return ips->gfx_power + state2; +} + +void intel_rps_enable(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + struct intel_uncore *uncore = rps_to_uncore(rps); + + intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); + if (IS_CHERRYVIEW(i915)) + rps->enabled = chv_rps_enable(rps); + else if (IS_VALLEYVIEW(i915)) + rps->enabled = vlv_rps_enable(rps); + else if (INTEL_GEN(i915) >= 9) + rps->enabled = gen9_rps_enable(rps); + else if (INTEL_GEN(i915) >= 8) + rps->enabled = gen8_rps_enable(rps); + else if (INTEL_GEN(i915) >= 6) + rps->enabled = gen6_rps_enable(rps); + else if (IS_IRONLAKE_M(i915)) + rps->enabled = gen5_rps_enable(rps); + intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); + if (!rps->enabled) + return; + + WARN_ON(rps->max_freq < rps->min_freq); + WARN_ON(rps->idle_freq > rps->max_freq); + + WARN_ON(rps->efficient_freq < rps->min_freq); + WARN_ON(rps->efficient_freq > rps->max_freq); +} + +static void gen6_rps_disable(struct intel_rps *rps) +{ + intel_uncore_write(rps_to_uncore(rps), GEN6_RP_CONTROL, 0); +} + +void intel_rps_disable(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + rps->enabled = false; + + if (INTEL_GEN(i915) >= 6) + gen6_rps_disable(rps); + else if (IS_IRONLAKE_M(i915)) + gen5_rps_disable(rps); +} + +static int byt_gpu_freq(struct intel_rps *rps, int val) +{ + /* + * N = val - 0xb7 + * Slow = Fast = GPLL ref * N + */ + return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000); +} + +static int byt_freq_opcode(struct intel_rps *rps, int val) +{ + return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7; +} + +static int chv_gpu_freq(struct intel_rps *rps, int val) +{ + /* + * N = val / 2 + * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 + */ + return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000); +} + +static int chv_freq_opcode(struct intel_rps *rps, int val) +{ + /* CHV needs even values */ + return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2; +} + +int intel_gpu_freq(struct intel_rps *rps, int val) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + if (INTEL_GEN(i915) >= 9) + return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, + GEN9_FREQ_SCALER); + else if (IS_CHERRYVIEW(i915)) + return chv_gpu_freq(rps, val); + else if (IS_VALLEYVIEW(i915)) + return byt_gpu_freq(rps, val); + else + return val * GT_FREQUENCY_MULTIPLIER; +} + +int intel_freq_opcode(struct intel_rps *rps, int val) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + if (INTEL_GEN(i915) >= 9) + return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, + GT_FREQUENCY_MULTIPLIER); + else if (IS_CHERRYVIEW(i915)) + return chv_freq_opcode(rps, val); + else if (IS_VALLEYVIEW(i915)) + return byt_freq_opcode(rps, val); + else + return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); +} + +static void vlv_init_gpll_ref_freq(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + rps->gpll_ref_freq = + vlv_get_cck_clock(i915, "GPLL ref", + CCK_GPLL_CLOCK_CONTROL, + i915->czclk_freq); + + DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", rps->gpll_ref_freq); +} + +static void vlv_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + vlv_iosf_sb_get(i915, + BIT(VLV_IOSF_SB_PUNIT) | + BIT(VLV_IOSF_SB_NC) | + BIT(VLV_IOSF_SB_CCK)); + + vlv_init_gpll_ref_freq(rps); + + val = vlv_punit_read(i915, PUNIT_REG_GPU_FREQ_STS); + switch ((val >> 6) & 3) { + case 0: + case 1: + i915->mem_freq = 800; + break; + case 2: + i915->mem_freq = 1066; + break; + case 3: + i915->mem_freq = 1333; + break; + } + DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq); + + rps->max_freq = vlv_rps_max_freq(rps); + rps->rp0_freq = rps->max_freq; + DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->max_freq), + rps->max_freq); + + rps->efficient_freq = vlv_rps_rpe_freq(rps); + DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->efficient_freq), + rps->efficient_freq); + + rps->rp1_freq = vlv_rps_guar_freq(rps); + DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->rp1_freq), + rps->rp1_freq); + + rps->min_freq = vlv_rps_min_freq(rps); + DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->min_freq), + rps->min_freq); + + vlv_iosf_sb_put(i915, + BIT(VLV_IOSF_SB_PUNIT) | + BIT(VLV_IOSF_SB_NC) | + BIT(VLV_IOSF_SB_CCK)); +} + +static void chv_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 val; + + vlv_iosf_sb_get(i915, + BIT(VLV_IOSF_SB_PUNIT) | + BIT(VLV_IOSF_SB_NC) | + BIT(VLV_IOSF_SB_CCK)); + + vlv_init_gpll_ref_freq(rps); + + val = vlv_cck_read(i915, CCK_FUSE_REG); + + switch ((val >> 2) & 0x7) { + case 3: + i915->mem_freq = 2000; + break; + default: + i915->mem_freq = 1600; + break; + } + DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", i915->mem_freq); + + rps->max_freq = chv_rps_max_freq(rps); + rps->rp0_freq = rps->max_freq; + DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->max_freq), + rps->max_freq); + + rps->efficient_freq = chv_rps_rpe_freq(rps); + DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->efficient_freq), + rps->efficient_freq); + + rps->rp1_freq = chv_rps_guar_freq(rps); + DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->rp1_freq), + rps->rp1_freq); + + rps->min_freq = chv_rps_min_freq(rps); + DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", + intel_gpu_freq(rps, rps->min_freq), + rps->min_freq); + + vlv_iosf_sb_put(i915, + BIT(VLV_IOSF_SB_PUNIT) | + BIT(VLV_IOSF_SB_NC) | + BIT(VLV_IOSF_SB_CCK)); + + WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq | + rps->min_freq) & 1, + "Odd GPU freq values\n"); +} + +static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei) +{ + ei->ktime = ktime_get_raw(); + ei->render_c0 = intel_uncore_read(uncore, VLV_RENDER_C0_COUNT); + ei->media_c0 = intel_uncore_read(uncore, VLV_MEDIA_C0_COUNT); +} + +static u32 vlv_wa_c0_ei(struct intel_rps *rps, u32 pm_iir) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + const struct intel_rps_ei *prev = &rps->ei; + struct intel_rps_ei now; + u32 events = 0; + + if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) + return 0; + + vlv_c0_read(uncore, &now); + + if (prev->ktime) { + u64 time, c0; + u32 render, media; + + time = ktime_us_delta(now.ktime, prev->ktime); + + time *= rps_to_i915(rps)->czclk_freq; + + /* Workload can be split between render + media, + * e.g. SwapBuffers being blitted in X after being rendered in + * mesa. To account for this we need to combine both engines + * into our activity counter. + */ + render = now.render_c0 - prev->render_c0; + media = now.media_c0 - prev->media_c0; + c0 = max(render, media); + c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ + + if (c0 > time * rps->power.up_threshold) + events = GEN6_PM_RP_UP_THRESHOLD; + else if (c0 < time * rps->power.down_threshold) + events = GEN6_PM_RP_DOWN_THRESHOLD; + } + + rps->ei = now; + return events; +} + +static void rps_work(struct work_struct *work) +{ + struct intel_rps *rps = container_of(work, typeof(*rps), work); + struct intel_gt *gt = rps_to_gt(rps); + bool client_boost = false; + int new_freq, adj, min, max; + u32 pm_iir = 0; + + spin_lock_irq(>->irq_lock); + pm_iir = fetch_and_zero(&rps->pm_iir); + client_boost = atomic_read(&rps->num_waiters); + spin_unlock_irq(>->irq_lock); + + /* Make sure we didn't queue anything we're not going to process. */ + if ((pm_iir & rps->pm_events) == 0 && !client_boost) + goto out; + + mutex_lock(&rps->lock); + + pm_iir |= vlv_wa_c0_ei(rps, pm_iir); + + adj = rps->last_adj; + new_freq = rps->cur_freq; + min = rps->min_freq_softlimit; + max = rps->max_freq_softlimit; + if (client_boost) + max = rps->max_freq; + if (client_boost && new_freq < rps->boost_freq) { + new_freq = rps->boost_freq; + adj = 0; + } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { + if (adj > 0) + adj *= 2; + else /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(gt->i915) ? 2 : 1; + + if (new_freq >= rps->max_freq_softlimit) + adj = 0; + } else if (client_boost) { + adj = 0; + } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { + if (rps->cur_freq > rps->efficient_freq) + new_freq = rps->efficient_freq; + else if (rps->cur_freq > rps->min_freq_softlimit) + new_freq = rps->min_freq_softlimit; + adj = 0; + } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { + if (adj < 0) + adj *= 2; + else /* CHV needs even encode values */ + adj = IS_CHERRYVIEW(gt->i915) ? -2 : -1; + + if (new_freq <= rps->min_freq_softlimit) + adj = 0; + } else { /* unknown event */ + adj = 0; + } + + rps->last_adj = adj; + + /* + * Limit deboosting and boosting to keep ourselves at the extremes + * when in the respective power modes (i.e. slowly decrease frequencies + * while in the HIGH_POWER zone and slowly increase frequencies while + * in the LOW_POWER zone). On idle, we will hit the timeout and drop + * to the next level quickly, and conversely if busy we expect to + * hit a waitboost and rapidly switch into max power. + */ + if ((adj < 0 && rps->power.mode == HIGH_POWER) || + (adj > 0 && rps->power.mode == LOW_POWER)) + rps->last_adj = 0; + + /* sysfs frequency interfaces may have snuck in while servicing the + * interrupt + */ + new_freq += adj; + new_freq = clamp_t(int, new_freq, min, max); + + if (intel_rps_set(rps, new_freq)) { + DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); + rps->last_adj = 0; + } + + mutex_unlock(&rps->lock); + +out: + spin_lock_irq(>->irq_lock); + gen6_gt_pm_unmask_irq(gt, rps->pm_events); + spin_unlock_irq(>->irq_lock); +} + +void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) +{ + struct intel_gt *gt = rps_to_gt(rps); + const u32 events = rps->pm_events & pm_iir; + + lockdep_assert_held(>->irq_lock); + + if (unlikely(!events)) + return; + + gen6_gt_pm_mask_irq(gt, events); + + rps->pm_iir |= events; + schedule_work(&rps->work); +} + +void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir) +{ + struct intel_gt *gt = rps_to_gt(rps); + + if (pm_iir & rps->pm_events) { + spin_lock(>->irq_lock); + gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events); + rps->pm_iir |= pm_iir & rps->pm_events; + schedule_work(&rps->work); + spin_unlock(>->irq_lock); + } + + if (INTEL_GEN(gt->i915) >= 8) + return; + + if (pm_iir & PM_VEBOX_USER_INTERRUPT) + intel_engine_breadcrumbs_irq(gt->engine[VECS0]); + + if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) + DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); +} + +void gen5_rps_irq_handler(struct intel_rps *rps) +{ + struct intel_uncore *uncore = rps_to_uncore(rps); + u32 busy_up, busy_down, max_avg, min_avg; + u8 new_freq; + + spin_lock(&mchdev_lock); + + intel_uncore_write16(uncore, + MEMINTRSTS, + intel_uncore_read(uncore, MEMINTRSTS)); + + intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); + busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); + busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); + max_avg = intel_uncore_read(uncore, RCBMAXAVG); + min_avg = intel_uncore_read(uncore, RCBMINAVG); + + /* Handle RCS change request from hw */ + new_freq = rps->cur_freq; + if (busy_up > max_avg) + new_freq++; + else if (busy_down < min_avg) + new_freq--; + new_freq = clamp(new_freq, + rps->min_freq_softlimit, + rps->max_freq_softlimit); + + if (new_freq != rps->cur_freq && gen5_rps_set(rps, new_freq)) + rps->cur_freq = new_freq; + + spin_unlock(&mchdev_lock); +} + +void intel_rps_init_early(struct intel_rps *rps) +{ + mutex_init(&rps->lock); + mutex_init(&rps->power.mutex); + + INIT_WORK(&rps->work, rps_work); + + atomic_set(&rps->num_waiters, 0); +} + +void intel_rps_init(struct intel_rps *rps) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + + if (IS_CHERRYVIEW(i915)) + chv_rps_init(rps); + else if (IS_VALLEYVIEW(i915)) + vlv_rps_init(rps); + else if (INTEL_GEN(i915) >= 6) + gen6_rps_init(rps); + else if (IS_IRONLAKE_M(i915)) + gen5_rps_init(rps); + + /* Derive initial user preferences/limits from the hardware limits */ + rps->max_freq_softlimit = rps->max_freq; + rps->min_freq_softlimit = rps->min_freq; + + /* After setting max-softlimit, find the overclock max freq */ + if (IS_GEN(i915, 6) || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { + u32 params = 0; + + sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS, + ¶ms, NULL); + if (params & BIT(31)) { /* OC supported */ + DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n", + (rps->max_freq & 0xff) * 50, + (params & 0xff) * 50); + rps->max_freq = params & 0xff; + } + } + + /* Finally allow us to boost to max by default */ + rps->boost_freq = rps->max_freq; + rps->idle_freq = rps->min_freq; + rps->cur_freq = rps->idle_freq; + + rps->pm_intrmsk_mbz = 0; + + /* + * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer + * if GEN6_PM_UP_EI_EXPIRED is masked. + * + * TODO: verify if this can be reproduced on VLV,CHV. + */ + if (INTEL_GEN(i915) <= 7) + rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; + + if (INTEL_GEN(i915) >= 8) + rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; +} + +u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat) +{ + struct drm_i915_private *i915 = rps_to_i915(rps); + u32 cagf; + + if (INTEL_GEN(i915) >= 9) + cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; + else if (IS_HASWELL(i915) || IS_BROADWELL(i915)) + cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; + else + cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; + + return cagf; +} + +/* External interface for intel_ips.ko */ + +static struct drm_i915_private __rcu *ips_mchdev; + +/** + * Tells the intel_ips driver that the i915 driver is now loaded, if + * IPS got loaded first. + * + * This awkward dance is so that neither module has to depend on the + * other in order for IPS to do the appropriate communication of + * GPU turbo limits to i915. + */ +static void +ips_ping_for_i915_load(void) +{ + void (*link)(void); + + link = symbol_get(ips_link_to_i915_driver); + if (link) { + link(); + symbol_put(ips_link_to_i915_driver); + } +} + +void intel_rps_driver_register(struct intel_rps *rps) +{ + struct intel_gt *gt = rps_to_gt(rps); + + /* + * We only register the i915 ips part with intel-ips once everything is + * set up, to avoid intel-ips sneaking in and reading bogus values. + */ + if (IS_GEN(gt->i915, 5)) { + rcu_assign_pointer(ips_mchdev, gt->i915); + ips_ping_for_i915_load(); + } +} + +void intel_rps_driver_unregister(struct intel_rps *rps) +{ + rcu_assign_pointer(ips_mchdev, NULL); +} + +static struct drm_i915_private *mchdev_get(void) +{ + struct drm_i915_private *i915; + + rcu_read_lock(); + i915 = rcu_dereference(ips_mchdev); + if (!kref_get_unless_zero(&i915->drm.ref)) + i915 = NULL; + rcu_read_unlock(); + + return i915; +} + +/** + * i915_read_mch_val - return value for IPS use + * + * Calculate and return a value for the IPS driver to use when deciding whether + * we have thermal and power headroom to increase CPU or GPU power budget. + */ +unsigned long i915_read_mch_val(void) +{ + struct drm_i915_private *i915; + unsigned long chipset_val = 0; + unsigned long graphics_val = 0; + intel_wakeref_t wakeref; + + i915 = mchdev_get(); + if (!i915) + return 0; + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + struct intel_ips *ips = &i915->gt.rps.ips; + + spin_lock_irq(&mchdev_lock); + chipset_val = __ips_chipset_val(ips); + graphics_val = __ips_gfx_val(ips); + spin_unlock_irq(&mchdev_lock); + } + + drm_dev_put(&i915->drm); + return chipset_val + graphics_val; +} +EXPORT_SYMBOL_GPL(i915_read_mch_val); + +/** + * i915_gpu_raise - raise GPU frequency limit + * + * Raise the limit; IPS indicates we have thermal headroom. + */ +bool i915_gpu_raise(void) +{ + struct drm_i915_private *i915; + struct intel_rps *rps; + + i915 = mchdev_get(); + if (!i915) + return false; + + rps = &i915->gt.rps; + + spin_lock_irq(&mchdev_lock); + if (rps->max_freq_softlimit < rps->max_freq) + rps->max_freq_softlimit++; + spin_unlock_irq(&mchdev_lock); + + drm_dev_put(&i915->drm); + return true; +} +EXPORT_SYMBOL_GPL(i915_gpu_raise); + +/** + * i915_gpu_lower - lower GPU frequency limit + * + * IPS indicates we're close to a thermal limit, so throttle back the GPU + * frequency maximum. + */ +bool i915_gpu_lower(void) +{ + struct drm_i915_private *i915; + struct intel_rps *rps; + + i915 = mchdev_get(); + if (!i915) + return false; + + rps = &i915->gt.rps; + + spin_lock_irq(&mchdev_lock); + if (rps->max_freq_softlimit > rps->min_freq) + rps->max_freq_softlimit--; + spin_unlock_irq(&mchdev_lock); + + drm_dev_put(&i915->drm); + return true; +} +EXPORT_SYMBOL_GPL(i915_gpu_lower); + +/** + * i915_gpu_busy - indicate GPU business to IPS + * + * Tell the IPS driver whether or not the GPU is busy. + */ +bool i915_gpu_busy(void) +{ + struct drm_i915_private *i915; + bool ret; + + i915 = mchdev_get(); + if (!i915) + return false; + + ret = i915->gt.awake; + + drm_dev_put(&i915->drm); + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_busy); + +/** + * i915_gpu_turbo_disable - disable graphics turbo + * + * Disable graphics turbo by resetting the max frequency and setting the + * current frequency to the default. + */ +bool i915_gpu_turbo_disable(void) +{ + struct drm_i915_private *i915; + struct intel_rps *rps; + bool ret; + + i915 = mchdev_get(); + if (!i915) + return false; + + rps = &i915->gt.rps; + + spin_lock_irq(&mchdev_lock); + rps->max_freq_softlimit = rps->min_freq; + ret = gen5_rps_set(&i915->gt.rps, rps->min_freq); + spin_unlock_irq(&mchdev_lock); + + drm_dev_put(&i915->drm); + return ret; +} +EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.h b/drivers/gpu/drm/i915/gt/intel_rps.h new file mode 100644 index 000000000000..9518c66c9792 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rps.h @@ -0,0 +1,38 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RPS_H +#define INTEL_RPS_H + +#include "intel_rps_types.h" + +struct i915_request; + +void intel_rps_init_early(struct intel_rps *rps); +void intel_rps_init(struct intel_rps *rps); + +void intel_rps_driver_register(struct intel_rps *rps); +void intel_rps_driver_unregister(struct intel_rps *rps); + +void intel_rps_enable(struct intel_rps *rps); +void intel_rps_disable(struct intel_rps *rps); + +void intel_rps_park(struct intel_rps *rps); +void intel_rps_unpark(struct intel_rps *rps); +void intel_rps_boost(struct i915_request *rq); + +int intel_rps_set(struct intel_rps *rps, u8 val); +void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive); + +int intel_gpu_freq(struct intel_rps *rps, int val); +int intel_freq_opcode(struct intel_rps *rps, int val); +u32 intel_get_cagf(struct intel_rps *rps, u32 rpstat1); + +void gen5_rps_irq_handler(struct intel_rps *rps); +void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir); +void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir); + +#endif /* INTEL_RPS_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_rps_types.h b/drivers/gpu/drm/i915/gt/intel_rps_types.h new file mode 100644 index 000000000000..c2e279154bd5 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_rps_types.h @@ -0,0 +1,93 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef INTEL_RPS_TYPES_H +#define INTEL_RPS_TYPES_H + +#include <linux/atomic.h> +#include <linux/ktime.h> +#include <linux/mutex.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +struct intel_ips { + u64 last_count1; + unsigned long last_time1; + unsigned long chipset_power; + u64 last_count2; + u64 last_time2; + unsigned long gfx_power; + u8 corr; + + int c, m; +}; + +struct intel_rps_ei { + ktime_t ktime; + u32 render_c0; + u32 media_c0; +}; + +struct intel_rps { + struct mutex lock; /* protects enabling and the worker */ + + /* + * work, interrupts_enabled and pm_iir are protected by + * dev_priv->irq_lock + */ + struct work_struct work; + bool enabled; + bool active; + u32 pm_iir; + + /* PM interrupt bits that should never be masked */ + u32 pm_intrmsk_mbz; + u32 pm_events; + + /* Frequencies are stored in potentially platform dependent multiples. + * In other words, *_freq needs to be multiplied by X to be interesting. + * Soft limits are those which are used for the dynamic reclocking done + * by the driver (raise frequencies under heavy loads, and lower for + * lighter loads). Hard limits are those imposed by the hardware. + * + * A distinction is made for overclocking, which is never enabled by + * default, and is considered to be above the hard limit if it's + * possible at all. + */ + u8 cur_freq; /* Current frequency (cached, may not == HW) */ + u8 last_freq; /* Last SWREQ frequency */ + u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ + u8 max_freq_softlimit; /* Max frequency permitted by the driver */ + u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ + u8 min_freq; /* AKA RPn. Minimum frequency */ + u8 boost_freq; /* Frequency to request when wait boosting */ + u8 idle_freq; /* Frequency to request when we are idle */ + u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ + u8 rp1_freq; /* "less than" RP0 power/freqency */ + u8 rp0_freq; /* Non-overclocked max frequency. */ + u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ + + int last_adj; + + struct { + struct mutex mutex; + + enum { LOW_POWER, BETWEEN, HIGH_POWER } mode; + unsigned int interactive; + + u8 up_threshold; /* Current %busy required to uplock */ + u8 down_threshold; /* Current %busy required to downclock */ + } power; + + atomic_t num_waiters; + atomic_t boosts; + + /* manual wa residency calculations */ + struct intel_rps_ei ei; + struct intel_ips ips; +}; + +#endif /* INTEL_RPS_TYPES_H */ diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.c b/drivers/gpu/drm/i915/gt/intel_sseu.c index 6bf2d87da109..74f793423231 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.c +++ b/drivers/gpu/drm/i915/gt/intel_sseu.c @@ -8,6 +8,19 @@ #include "intel_lrc_reg.h" #include "intel_sseu.h" +void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, + u8 max_subslices, u8 max_eus_per_subslice) +{ + sseu->max_slices = max_slices; + sseu->max_subslices = max_subslices; + sseu->max_eus_per_subslice = max_eus_per_subslice; + + sseu->ss_stride = GEN_SSEU_STRIDE(sseu->max_subslices); + GEM_BUG_ON(sseu->ss_stride > GEN_MAX_SUBSLICE_STRIDE); + sseu->eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); + GEM_BUG_ON(sseu->eu_stride > GEN_MAX_EU_STRIDE); +} + unsigned int intel_sseu_subslice_total(const struct sseu_dev_info *sseu) { @@ -19,10 +32,32 @@ intel_sseu_subslice_total(const struct sseu_dev_info *sseu) return total; } +u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice) +{ + int i, offset = slice * sseu->ss_stride; + u32 mask = 0; + + GEM_BUG_ON(slice >= sseu->max_slices); + + for (i = 0; i < sseu->ss_stride; i++) + mask |= (u32)sseu->subslice_mask[offset + i] << + i * BITS_PER_BYTE; + + return mask; +} + +void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, + u32 ss_mask) +{ + int offset = slice * sseu->ss_stride; + + memcpy(&sseu->subslice_mask[offset], &ss_mask, sseu->ss_stride); +} + unsigned int intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice) { - return hweight8(sseu->subslice_mask[slice]); + return hweight32(intel_sseu_get_subslices(sseu, slice)); } u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index b50d0401a4e2..d1d225204f09 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -10,15 +10,21 @@ #include <linux/types.h> #include <linux/kernel.h> +#include "i915_gem.h" + struct drm_i915_private; #define GEN_MAX_SLICES (6) /* CNL upper bound */ #define GEN_MAX_SUBSLICES (8) /* ICL upper bound */ #define GEN_SSEU_STRIDE(max_entries) DIV_ROUND_UP(max_entries, BITS_PER_BYTE) +#define GEN_MAX_SUBSLICE_STRIDE GEN_SSEU_STRIDE(GEN_MAX_SUBSLICES) +#define GEN_MAX_EUS (16) /* TGL upper bound */ +#define GEN_MAX_EU_STRIDE GEN_SSEU_STRIDE(GEN_MAX_EUS) struct sseu_dev_info { u8 slice_mask; - u8 subslice_mask[GEN_MAX_SLICES]; + u8 subslice_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICE_STRIDE]; + u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES * GEN_MAX_EU_STRIDE]; u16 eu_total; u8 eu_per_subslice; u8 min_eu_in_pool; @@ -33,11 +39,8 @@ struct sseu_dev_info { u8 max_subslices; u8 max_eus_per_subslice; - /* We don't have more than 8 eus per subslice at the moment and as we - * store eus enabled using bits, no need to multiply by eus per - * subslice. - */ - u8 eu_mask[GEN_MAX_SLICES * GEN_MAX_SUBSLICES]; + u8 ss_stride; + u8 eu_stride; }; /* @@ -63,12 +66,34 @@ intel_sseu_from_device_info(const struct sseu_dev_info *sseu) return value; } +static inline bool +intel_sseu_has_subslice(const struct sseu_dev_info *sseu, int slice, + int subslice) +{ + u8 mask; + int ss_idx = subslice / BITS_PER_BYTE; + + GEM_BUG_ON(ss_idx >= sseu->ss_stride); + + mask = sseu->subslice_mask[slice * sseu->ss_stride + ss_idx]; + + return mask & BIT(subslice % BITS_PER_BYTE); +} + +void intel_sseu_set_info(struct sseu_dev_info *sseu, u8 max_slices, + u8 max_subslices, u8 max_eus_per_subslice); + unsigned int intel_sseu_subslice_total(const struct sseu_dev_info *sseu); unsigned int intel_sseu_subslices_per_slice(const struct sseu_dev_info *sseu, u8 slice); +u32 intel_sseu_get_subslices(const struct sseu_dev_info *sseu, u8 slice); + +void intel_sseu_set_subslices(struct sseu_dev_info *sseu, int slice, + u32 ss_mask); + u32 intel_sseu_make_rpcs(struct drm_i915_private *i915, const struct intel_sseu *req_sseu); diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 9cb01d9828f1..14ad10acd548 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -4,13 +4,13 @@ * Copyright © 2016-2018 Intel Corporation */ -#include "gt/intel_gt_types.h" - #include "i915_drv.h" #include "i915_active.h" #include "i915_syncmap.h" -#include "gt/intel_timeline.h" +#include "intel_gt.h" +#include "intel_ring.h" +#include "intel_timeline.h" #define ptr_set_bit(ptr, bit) ((typeof(ptr))((unsigned long)(ptr) | BIT(bit))) #define ptr_test_bit(ptr, bit) ((unsigned long)(ptr) & BIT(bit)) @@ -136,6 +136,7 @@ static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) kfree(cl); } +__i915_active_call static void __cacheline_retire(struct i915_active *active) { struct intel_timeline_cacheline *cl = @@ -177,8 +178,7 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) cl->hwsp = hwsp; cl->vaddr = page_pack_bits(vaddr, cacheline); - i915_active_init(hwsp->gt->i915, &cl->active, - __cacheline_active, __cacheline_retire); + i915_active_init(&cl->active, __cacheline_active, __cacheline_retire); return cl; } @@ -254,7 +254,7 @@ int intel_timeline_init(struct intel_timeline *timeline, mutex_init(&timeline->mutex); - INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex); + INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex); INIT_LIST_HEAD(&timeline->requests); i915_syncmap_init(&timeline->sync); @@ -442,7 +442,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl, * free it after the current request is retired, which ensures that * all writes into the cacheline from previous requests are complete. */ - err = i915_active_ref(&tl->hwsp_cacheline->active, tl, rq); + err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence); if (err) goto err_cacheline; @@ -493,24 +493,39 @@ int intel_timeline_get_seqno(struct intel_timeline *tl, static int cacheline_ref(struct intel_timeline_cacheline *cl, struct i915_request *rq) { - return i915_active_ref(&cl->active, rq->timeline, rq); + return i915_active_add_request(&cl->active, rq); } int intel_timeline_read_hwsp(struct i915_request *from, struct i915_request *to, u32 *hwsp) { - struct intel_timeline_cacheline *cl = from->hwsp_cacheline; - struct intel_timeline *tl = from->timeline; + struct intel_timeline *tl; int err; - GEM_BUG_ON(to->timeline == tl); + rcu_read_lock(); + tl = rcu_dereference(from->timeline); + if (i915_request_completed(from) || !kref_get_unless_zero(&tl->kref)) + tl = NULL; + rcu_read_unlock(); + if (!tl) /* already completed */ + return 1; + + GEM_BUG_ON(rcu_access_pointer(to->timeline) == tl); + + err = -EBUSY; + if (mutex_trylock(&tl->mutex)) { + struct intel_timeline_cacheline *cl = from->hwsp_cacheline; + + if (i915_request_completed(from)) { + err = 1; + goto unlock; + } - mutex_lock_nested(&tl->mutex, SINGLE_DEPTH_NESTING); - err = i915_request_completed(from); - if (!err) err = cacheline_ref(cl, to); - if (!err) { + if (err) + goto unlock; + if (likely(cl == tl->hwsp_cacheline)) { *hwsp = tl->hwsp_offset; } else { /* across a seqno wrap, recover the original offset */ @@ -518,8 +533,11 @@ int intel_timeline_read_hwsp(struct i915_request *from, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS) * CACHELINE_BYTES; } + +unlock: + mutex_unlock(&tl->mutex); } - mutex_unlock(&tl->mutex); + intel_timeline_put(tl); return err; } @@ -541,7 +559,7 @@ void __intel_timeline_free(struct kref *kref) container_of(kref, typeof(*timeline), kref); intel_timeline_fini(timeline); - kfree(timeline); + kfree_rcu(timeline, rcu); } static void timelines_fini(struct intel_gt *gt) diff --git a/drivers/gpu/drm/i915/gt/intel_timeline_types.h b/drivers/gpu/drm/i915/gt/intel_timeline_types.h index 2b1baf2fcc8e..98d9ee166379 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline_types.h +++ b/drivers/gpu/drm/i915/gt/intel_timeline_types.h @@ -58,12 +58,13 @@ struct intel_timeline { */ struct list_head requests; - /* Contains an RCU guarded pointer to the last request. No reference is + /* + * Contains an RCU guarded pointer to the last request. No reference is * held to the request, users must carefully acquire a reference to - * the request using i915_active_request_get_request_rcu(), or hold the - * struct_mutex. + * the request using i915_active_fence_get(), or manage the RCU + * protection themselves (cf the i915_active_fence API). */ - struct i915_active_request last_request; + struct i915_active_fence last_request; /** * We track the most recent seqno that we wait on in every context so @@ -80,6 +81,7 @@ struct intel_timeline { struct intel_gt *gt; struct kref kref; + struct rcu_head rcu; }; #endif /* __I915_TIMELINE_TYPES_H__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 5f6ec2fd29a0..e4bccc14602f 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -7,6 +7,7 @@ #include "i915_drv.h" #include "intel_context.h" #include "intel_gt.h" +#include "intel_ring.h" #include "intel_workarounds.h" /** @@ -567,6 +568,9 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine, static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { + /* Wa_1409142259:tgl */ + WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3, + GEN12_DISABLE_CPS_AWARE_COLOR_PIPE); } static void @@ -796,11 +800,10 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal) } slice = fls(sseu->slice_mask) - 1; - GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask)); - subslice = fls(l3_en & sseu->subslice_mask[slice]); + subslice = fls(l3_en & intel_sseu_get_subslices(sseu, slice)); if (!subslice) { DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n", - sseu->subslice_mask[slice], l3_en); + intel_sseu_get_subslices(sseu, slice), l3_en); subslice = fls(l3_en); WARN_ON(!subslice); } @@ -890,11 +893,27 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) wa_write_or(wal, GAMT_CHKN_BIT_REG, GAMT_CHKN_DISABLE_L3_COH_PIPE); + + /* Wa_1607087056:icl */ + wa_write_or(wal, + SLICE_UNIT_LEVEL_CLKGATE, + L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); } static void tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal) { + /* Wa_1409420604:tgl */ + if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) + wa_write_or(wal, + SUBSLICE_UNIT_LEVEL_CLKGATE2, + CPSSUNIT_CLKGATE_DIS); + + /* Wa_1409180338:tgl */ + if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) + wa_write_or(wal, + SLICE_UNIT_LEVEL_CLKGATE, + L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS); } static void @@ -1197,6 +1216,26 @@ static void icl_whitelist_build(struct intel_engine_cs *engine) static void tgl_whitelist_build(struct intel_engine_cs *engine) { + struct i915_wa_list *w = &engine->whitelist; + + switch (engine->class) { + case RENDER_CLASS: + /* + * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl + * + * This covers 4 registers which are next to one another : + * - PS_INVOCATION_COUNT + * - PS_INVOCATION_COUNT_UDW + * - PS_DEPTH_COUNT + * - PS_DEPTH_COUNT_UDW + */ + whitelist_reg_ext(w, PS_INVOCATION_COUNT, + RING_FORCE_TO_NONPRIV_ACCESS_RD | + RING_FORCE_TO_NONPRIV_RANGE_4); + break; + default: + break; + } } void intel_engine_init_whitelist(struct intel_engine_cs *engine) @@ -1258,6 +1297,26 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) { struct drm_i915_private *i915 = engine->i915; + if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) { + /* Wa_1606700617:tgl */ + wa_masked_en(wal, + GEN9_CS_DEBUG_MODE1, + FF_DOP_CLOCK_GATE_DISABLE); + + /* Wa_1607138336:tgl */ + wa_write_or(wal, + GEN9_CTX_PREEMPT_REG, + GEN12_DISABLE_POSH_BUSY_FF_DOP_CG); + + /* Wa_1607030317:tgl */ + /* Wa_1607186500:tgl */ + /* Wa_1607297627:tgl */ + wa_masked_en(wal, + GEN6_RC_SLEEP_PSMI_CONTROL, + GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE | + GEN8_RC_SEMA_IDLE_MSG_DISABLE); + } + if (IS_GEN(i915, 11)) { /* This is not an Wa. Enable for better image quality */ wa_masked_en(wal, @@ -1452,7 +1511,7 @@ static bool mcr_range(struct drm_i915_private *i915, u32 offset) * which only controls CPU initiated MMIO. Routing does not * work for CS access so we cannot verify them on this path. */ - if (INTEL_GEN(i915) >= 8 && (offset >= 0xb100 && offset <= 0xb3ff)) + if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff)) return true; return false; diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c index 5d43cbc3f345..83f549d203a0 100644 --- a/drivers/gpu/drm/i915/gt/mock_engine.c +++ b/drivers/gpu/drm/i915/gt/mock_engine.c @@ -23,6 +23,7 @@ */ #include "gem/i915_gem_context.h" +#include "gt/intel_ring.h" #include "i915_drv.h" #include "intel_context.h" @@ -240,6 +241,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, struct mock_engine *engine; GEM_BUG_ON(id >= I915_NUM_ENGINES); + GEM_BUG_ON(!i915->gt.uncore); engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL); if (!engine) @@ -248,9 +250,11 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, /* minimal engine setup for requests */ engine->base.i915 = i915; engine->base.gt = &i915->gt; + engine->base.uncore = i915->gt.uncore; snprintf(engine->base.name, sizeof(engine->base.name), "%s", name); engine->base.id = id; engine->base.mask = BIT(id); + engine->base.legacy_idx = INVALID_ENGINE; engine->base.instance = id; engine->base.status_page.addr = (void *)(engine + 1); @@ -265,6 +269,9 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915, engine->base.reset.finish = mock_reset_finish; engine->base.cancel_requests = mock_cancel_requests; + i915->gt.engine[id] = &engine->base; + i915->gt.engine_class[0][id] = &engine->base; + /* fake hw queue */ spin_lock_init(&engine->hw_lock); timer_setup(&engine->hw_delay, hw_delay_complete, 0); diff --git a/drivers/gpu/drm/i915/gt/selftest_context.c b/drivers/gpu/drm/i915/gt/selftest_context.c index 9d1ea26c7a2d..bc720defc6b8 100644 --- a/drivers/gpu/drm/i915/gt/selftest_context.c +++ b/drivers/gpu/drm/i915/gt/selftest_context.c @@ -14,22 +14,28 @@ static int request_sync(struct i915_request *rq) { + struct intel_timeline *tl = i915_request_timeline(rq); long timeout; int err = 0; + intel_timeline_get(tl); i915_request_get(rq); - i915_request_add(rq); + /* Opencode i915_request_add() so we can keep the timeline locked. */ + __i915_request_commit(rq); + __i915_request_queue(rq, NULL); + timeout = i915_request_wait(rq, 0, HZ / 10); - if (timeout < 0) { + if (timeout < 0) err = timeout; - } else { - mutex_lock(&rq->timeline->mutex); + else i915_request_retire_upto(rq); - mutex_unlock(&rq->timeline->mutex); - } + + lockdep_unpin_lock(&tl->mutex, rq->cookie); + mutex_unlock(&tl->mutex); i915_request_put(rq); + intel_timeline_put(tl); return err; } @@ -41,24 +47,20 @@ static int context_sync(struct intel_context *ce) mutex_lock(&tl->mutex); do { - struct i915_request *rq; + struct dma_fence *fence; long timeout; - rcu_read_lock(); - rq = rcu_dereference(tl->last_request.request); - if (rq) - rq = i915_request_get_rcu(rq); - rcu_read_unlock(); - if (!rq) + fence = i915_active_fence_get(&tl->last_request); + if (!fence) break; - timeout = i915_request_wait(rq, 0, HZ / 10); + timeout = dma_fence_wait_timeout(fence, false, HZ / 10); if (timeout < 0) err = timeout; else - i915_request_retire_upto(rq); + i915_request_retire_upto(to_request(fence)); - i915_request_put(rq); + dma_fence_put(fence); } while (!err); mutex_unlock(&tl->mutex); @@ -101,9 +103,6 @@ static int __live_context_size(struct intel_engine_cs *engine, * * TLDR; this overlaps with the execlists redzone. */ - if (HAS_EXECLISTS(engine->i915)) - vaddr += LRC_HEADER_PAGES * PAGE_SIZE; - vaddr += engine->context_size - I915_GTT_PAGE_SIZE; memset(vaddr, POISON_INUSE, I915_GTT_PAGE_SIZE); @@ -153,15 +152,11 @@ static int live_context_size(void *arg) * HW tries to write past the end of one. */ - mutex_lock(>->i915->drm.struct_mutex); - fixme = kernel_context(gt->i915); - if (IS_ERR(fixme)) { - err = PTR_ERR(fixme); - goto unlock; - } + if (IS_ERR(fixme)) + return PTR_ERR(fixme); - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { struct { struct drm_i915_gem_object *state; void *pinned; @@ -199,8 +194,6 @@ static int live_context_size(void *arg) } kernel_context_close(fixme); -unlock: - mutex_unlock(>->i915->drm.struct_mutex); return err; } @@ -303,26 +296,23 @@ static int live_active_context(void *arg) if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(>->i915->drm.struct_mutex); - fixme = live_context(gt->i915, file); if (IS_ERR(fixme)) { err = PTR_ERR(fixme); - goto unlock; + goto out_file; } - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { err = __live_active_context(engine, fixme); if (err) break; - err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); + err = igt_flush_test(gt->i915); if (err) break; } -unlock: - mutex_unlock(>->i915->drm.struct_mutex); +out_file: mock_file_free(gt->i915, file); return err; } @@ -416,26 +406,23 @@ static int live_remote_context(void *arg) if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(>->i915->drm.struct_mutex); - fixme = live_context(gt->i915, file); if (IS_ERR(fixme)) { err = PTR_ERR(fixme); - goto unlock; + goto out_file; } - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { err = __live_remote_context(engine, fixme); if (err) break; - err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); + err = igt_flush_test(gt->i915); if (err) break; } -unlock: - mutex_unlock(>->i915->drm.struct_mutex); +out_file: mock_file_free(gt->i915, file); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c new file mode 100644 index 000000000000..e864406bd2d9 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c @@ -0,0 +1,350 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2018 Intel Corporation + */ + +#include <linux/sort.h> + +#include "i915_drv.h" + +#include "intel_gt_requests.h" +#include "i915_selftest.h" + +struct pulse { + struct i915_active active; + struct kref kref; +}; + +static int pulse_active(struct i915_active *active) +{ + kref_get(&container_of(active, struct pulse, active)->kref); + return 0; +} + +static void pulse_free(struct kref *kref) +{ + kfree(container_of(kref, struct pulse, kref)); +} + +static void pulse_put(struct pulse *p) +{ + kref_put(&p->kref, pulse_free); +} + +static void pulse_retire(struct i915_active *active) +{ + pulse_put(container_of(active, struct pulse, active)); +} + +static struct pulse *pulse_create(void) +{ + struct pulse *p; + + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return p; + + kref_init(&p->kref); + i915_active_init(&p->active, pulse_active, pulse_retire); + + return p; +} + +static void pulse_unlock_wait(struct pulse *p) +{ + mutex_lock(&p->active.mutex); + mutex_unlock(&p->active.mutex); + flush_work(&p->active.work); +} + +static int __live_idle_pulse(struct intel_engine_cs *engine, + int (*fn)(struct intel_engine_cs *cs)) +{ + struct pulse *p; + int err; + + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); + + p = pulse_create(); + if (!p) + return -ENOMEM; + + err = i915_active_acquire(&p->active); + if (err) + goto out; + + err = i915_active_acquire_preallocate_barrier(&p->active, engine); + if (err) { + i915_active_release(&p->active); + goto out; + } + + i915_active_acquire_barrier(&p->active); + i915_active_release(&p->active); + + GEM_BUG_ON(i915_active_is_idle(&p->active)); + GEM_BUG_ON(llist_empty(&engine->barrier_tasks)); + + err = fn(engine); + if (err) + goto out; + + GEM_BUG_ON(!llist_empty(&engine->barrier_tasks)); + + if (intel_gt_retire_requests_timeout(engine->gt, HZ / 5)) { + err = -ETIME; + goto out; + } + + GEM_BUG_ON(READ_ONCE(engine->serial) != engine->wakeref_serial); + + pulse_unlock_wait(p); /* synchronize with the retirement callback */ + + if (!i915_active_is_idle(&p->active)) { + struct drm_printer m = drm_err_printer("pulse"); + + pr_err("%s: heartbeat pulse did not flush idle tasks\n", + engine->name); + i915_active_print(&p->active, &m); + + err = -EINVAL; + goto out; + } + +out: + pulse_put(p); + return err; +} + +static int live_idle_flush(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that we can flush the idle barriers */ + + for_each_engine(engine, gt, id) { + intel_engine_pm_get(engine); + err = __live_idle_pulse(engine, intel_engine_flush_barriers); + intel_engine_pm_put(engine); + if (err) + break; + } + + return err; +} + +static int live_idle_pulse(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that heartbeat pulses flush the idle barriers */ + + for_each_engine(engine, gt, id) { + intel_engine_pm_get(engine); + err = __live_idle_pulse(engine, intel_engine_pulse); + intel_engine_pm_put(engine); + if (err && err != -ENODEV) + break; + + err = 0; + } + + return err; +} + +static int cmp_u32(const void *_a, const void *_b) +{ + const u32 *a = _a, *b = _b; + + return *a - *b; +} + +static int __live_heartbeat_fast(struct intel_engine_cs *engine) +{ + struct intel_context *ce; + struct i915_request *rq; + ktime_t t0, t1; + u32 times[5]; + int err; + int i; + + ce = intel_context_create(engine->kernel_context->gem_context, + engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + intel_engine_pm_get(engine); + + err = intel_engine_set_heartbeat(engine, 1); + if (err) + goto err_pm; + + for (i = 0; i < ARRAY_SIZE(times); i++) { + /* Manufacture a tick */ + do { + while (READ_ONCE(engine->heartbeat.systole)) + flush_delayed_work(&engine->heartbeat.work); + + engine->serial++; /* quick, pretend we are not idle! */ + flush_delayed_work(&engine->heartbeat.work); + if (!delayed_work_pending(&engine->heartbeat.work)) { + pr_err("%s: heartbeat did not start\n", + engine->name); + err = -EINVAL; + goto err_pm; + } + + rcu_read_lock(); + rq = READ_ONCE(engine->heartbeat.systole); + if (rq) + rq = i915_request_get_rcu(rq); + rcu_read_unlock(); + } while (!rq); + + t0 = ktime_get(); + while (rq == READ_ONCE(engine->heartbeat.systole)) + yield(); /* work is on the local cpu! */ + t1 = ktime_get(); + + i915_request_put(rq); + times[i] = ktime_us_delta(t1, t0); + } + + sort(times, ARRAY_SIZE(times), sizeof(times[0]), cmp_u32, NULL); + + pr_info("%s: Heartbeat delay: %uus [%u, %u]\n", + engine->name, + times[ARRAY_SIZE(times) / 2], + times[0], + times[ARRAY_SIZE(times) - 1]); + + /* Min work delay is 2 * 2 (worst), +1 for scheduling, +1 for slack */ + if (times[ARRAY_SIZE(times) / 2] > jiffies_to_usecs(6)) { + pr_err("%s: Heartbeat delay was %uus, expected less than %dus\n", + engine->name, + times[ARRAY_SIZE(times) / 2], + jiffies_to_usecs(6)); + err = -EINVAL; + } + + intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL); +err_pm: + intel_engine_pm_put(engine); + intel_context_put(ce); + return err; +} + +static int live_heartbeat_fast(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that the heartbeat ticks at the desired rate. */ + if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL) + return 0; + + for_each_engine(engine, gt, id) { + err = __live_heartbeat_fast(engine); + if (err) + break; + } + + return err; +} + +static int __live_heartbeat_off(struct intel_engine_cs *engine) +{ + int err; + + intel_engine_pm_get(engine); + + engine->serial++; + flush_delayed_work(&engine->heartbeat.work); + if (!delayed_work_pending(&engine->heartbeat.work)) { + pr_err("%s: heartbeat not running\n", + engine->name); + err = -EINVAL; + goto err_pm; + } + + err = intel_engine_set_heartbeat(engine, 0); + if (err) + goto err_pm; + + engine->serial++; + flush_delayed_work(&engine->heartbeat.work); + if (delayed_work_pending(&engine->heartbeat.work)) { + pr_err("%s: heartbeat still running\n", + engine->name); + err = -EINVAL; + goto err_beat; + } + + if (READ_ONCE(engine->heartbeat.systole)) { + pr_err("%s: heartbeat still allocated\n", + engine->name); + err = -EINVAL; + goto err_beat; + } + +err_beat: + intel_engine_set_heartbeat(engine, CONFIG_DRM_I915_HEARTBEAT_INTERVAL); +err_pm: + intel_engine_pm_put(engine); + return err; +} + +static int live_heartbeat_off(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* Check that we can turn off heartbeat and not interrupt VIP */ + if (!CONFIG_DRM_I915_HEARTBEAT_INTERVAL) + return 0; + + for_each_engine(engine, gt, id) { + if (!intel_engine_has_preemption(engine)) + continue; + + err = __live_heartbeat_off(engine); + if (err) + break; + } + + return err; +} + +int intel_heartbeat_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_idle_flush), + SUBTEST(live_idle_pulse), + SUBTEST(live_heartbeat_fast), + SUBTEST(live_heartbeat_off), + }; + int saved_hangcheck; + int err; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + saved_hangcheck = i915_modparams.enable_hangcheck; + i915_modparams.enable_hangcheck = INT_MAX; + + err = intel_gt_live_subtests(tests, &i915->gt); + + i915_modparams.enable_hangcheck = saved_hangcheck; + return err; +} diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index 3a1419376912..20b9c83f43ad 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -25,7 +25,7 @@ static int live_engine_pm(void *arg) } GEM_BUG_ON(intel_gt_pm_is_awake(gt)); - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { const typeof(*igt_atomic_phases) *p; for (p = igt_atomic_phases; p->name; p++) { diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c new file mode 100644 index 000000000000..d1752f15702a --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -0,0 +1,60 @@ + +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "selftest_llc.h" + +static int live_gt_resume(void *arg) +{ + struct intel_gt *gt = arg; + IGT_TIMEOUT(end_time); + int err; + + /* Do several suspend/resume cycles to check we don't explode! */ + do { + intel_gt_suspend_prepare(gt); + intel_gt_suspend_late(gt); + + if (gt->rc6.enabled) { + pr_err("rc6 still enabled after suspend!\n"); + intel_gt_set_wedged_on_init(gt); + err = -EINVAL; + break; + } + + err = intel_gt_resume(gt); + if (err) + break; + + if (gt->rc6.supported && !gt->rc6.enabled) { + pr_err("rc6 not enabled upon resume!\n"); + intel_gt_set_wedged_on_init(gt); + err = -EINVAL; + break; + } + + err = st_llc_verify(>->llc); + if (err) { + pr_err("llc state not restored upon resume!\n"); + intel_gt_set_wedged_on_init(gt); + break; + } + } while (!__igt_timeout(end_time, NULL)); + + return err; +} + +int intel_gt_pm_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_gt_resume), + }; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); +} diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index a0098fc35921..85e9ccf5c304 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -131,7 +131,7 @@ static struct i915_request * hang_create_request(struct hang *h, struct intel_engine_cs *engine) { struct intel_gt *gt = h->gt; - struct i915_address_space *vm = h->ctx->vm ?: &engine->gt->ggtt->vm; + struct i915_address_space *vm = i915_gem_context_get_vm_rcu(h->ctx); struct drm_i915_gem_object *obj; struct i915_request *rq = NULL; struct i915_vma *hws, *vma; @@ -141,12 +141,15 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) int err; obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); - if (IS_ERR(obj)) + if (IS_ERR(obj)) { + i915_vm_put(vm); return ERR_CAST(obj); + } vaddr = i915_gem_object_pin_map(obj, i915_coherent_map_type(gt->i915)); if (IS_ERR(vaddr)) { i915_gem_object_put(obj); + i915_vm_put(vm); return ERR_CAST(vaddr); } @@ -157,16 +160,22 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine) h->batch = vaddr; vma = i915_vma_instance(h->obj, vm, NULL); - if (IS_ERR(vma)) + if (IS_ERR(vma)) { + i915_vm_put(vm); return ERR_CAST(vma); + } hws = i915_vma_instance(h->hws, vm, NULL); - if (IS_ERR(hws)) + if (IS_ERR(hws)) { + i915_vm_put(vm); return ERR_CAST(hws); + } err = i915_vma_pin(vma, 0, 0, PIN_USER); - if (err) + if (err) { + i915_vm_put(vm); return ERR_PTR(err); + } err = i915_vma_pin(hws, 0, 0, PIN_USER); if (err) @@ -264,6 +273,7 @@ unpin_hws: i915_vma_unpin(hws); unpin_vma: i915_vma_unpin(vma); + i915_vm_put(vm); return err ? ERR_PTR(err) : rq; } @@ -285,7 +295,7 @@ static void hang_fini(struct hang *h) kernel_context_close(h->ctx); - igt_flush_test(h->gt->i915, I915_WAIT_LOCKED); + igt_flush_test(h->gt->i915); } static bool wait_until_running(struct hang *h, struct i915_request *rq) @@ -309,12 +319,11 @@ static int igt_hang_sanitycheck(void *arg) /* Basic check that we can execute our hanging batch */ - mutex_lock(>->i915->drm.struct_mutex); err = hang_init(&h, gt); if (err) - goto unlock; + return err; - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { struct intel_wedge_me w; long timeout; @@ -355,8 +364,6 @@ static int igt_hang_sanitycheck(void *arg) fini: hang_fini(&h); -unlock: - mutex_unlock(>->i915->drm.struct_mutex); return err; } @@ -383,9 +390,7 @@ static int igt_reset_nop(void *arg) if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(>->i915->drm.struct_mutex); ctx = live_context(gt->i915, file); - mutex_unlock(>->i915->drm.struct_mutex); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; @@ -395,9 +400,7 @@ static int igt_reset_nop(void *arg) reset_count = i915_reset_count(global); count = 0; do { - mutex_lock(>->i915->drm.struct_mutex); - - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { int i; for (i = 0; i < 16; i++) { @@ -417,7 +420,6 @@ static int igt_reset_nop(void *arg) intel_gt_reset(gt, ALL_ENGINES, NULL); igt_global_reset_unlock(gt); - mutex_unlock(>->i915->drm.struct_mutex); if (intel_gt_is_wedged(gt)) { err = -EIO; break; @@ -429,16 +431,13 @@ static int igt_reset_nop(void *arg) break; } - err = igt_flush_test(gt->i915, 0); + err = igt_flush_test(gt->i915); if (err) break; } while (time_before(jiffies, end_time)); pr_info("%s: %d resets\n", __func__, count); - mutex_lock(>->i915->drm.struct_mutex); - err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); - mutex_unlock(>->i915->drm.struct_mutex); - + err = igt_flush_test(gt->i915); out: mock_file_free(gt->i915, file); if (intel_gt_is_wedged(gt)) @@ -458,23 +457,21 @@ static int igt_reset_nop_engine(void *arg) /* Check that we can engine-reset during non-user portions */ - if (!intel_has_reset_engine(gt->i915)) + if (!intel_has_reset_engine(gt)) return 0; file = mock_file(gt->i915); if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(>->i915->drm.struct_mutex); ctx = live_context(gt->i915, file); - mutex_unlock(>->i915->drm.struct_mutex); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; } i915_gem_context_clear_bannable(ctx); - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { unsigned int reset_count, reset_engine_count; unsigned int count; IGT_TIMEOUT(end_time); @@ -494,7 +491,6 @@ static int igt_reset_nop_engine(void *arg) break; } - mutex_lock(>->i915->drm.struct_mutex); for (i = 0; i < 16; i++) { struct i915_request *rq; @@ -507,7 +503,6 @@ static int igt_reset_nop_engine(void *arg) i915_request_add(rq); } err = intel_engine_reset(engine, NULL); - mutex_unlock(>->i915->drm.struct_mutex); if (err) { pr_err("i915_reset_engine failed\n"); break; @@ -533,15 +528,12 @@ static int igt_reset_nop_engine(void *arg) if (err) break; - err = igt_flush_test(gt->i915, 0); + err = igt_flush_test(gt->i915); if (err) break; } - mutex_lock(>->i915->drm.struct_mutex); - err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); - mutex_unlock(>->i915->drm.struct_mutex); - + err = igt_flush_test(gt->i915); out: mock_file_free(gt->i915, file); if (intel_gt_is_wedged(gt)) @@ -559,18 +551,16 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) /* Check that we can issue an engine reset on an idle engine (no-op) */ - if (!intel_has_reset_engine(gt->i915)) + if (!intel_has_reset_engine(gt)) return 0; if (active) { - mutex_lock(>->i915->drm.struct_mutex); err = hang_init(&h, gt); - mutex_unlock(>->i915->drm.struct_mutex); if (err) return err; } - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { unsigned int reset_count, reset_engine_count; IGT_TIMEOUT(end_time); @@ -593,17 +583,14 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) if (active) { struct i915_request *rq; - mutex_lock(>->i915->drm.struct_mutex); rq = hang_create_request(&h, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); - mutex_unlock(>->i915->drm.struct_mutex); break; } i915_request_get(rq); i915_request_add(rq); - mutex_unlock(>->i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(gt->i915->drm.dev); @@ -647,7 +634,7 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) if (err) break; - err = igt_flush_test(gt->i915, 0); + err = igt_flush_test(gt->i915); if (err) break; } @@ -655,11 +642,8 @@ static int __igt_reset_engine(struct intel_gt *gt, bool active) if (intel_gt_is_wedged(gt)) err = -EIO; - if (active) { - mutex_lock(>->i915->drm.struct_mutex); + if (active) hang_fini(&h); - mutex_unlock(>->i915->drm.struct_mutex); - } return err; } @@ -725,9 +709,7 @@ static int active_engine(void *data) return PTR_ERR(file); for (count = 0; count < ARRAY_SIZE(ctx); count++) { - mutex_lock(&engine->i915->drm.struct_mutex); ctx[count] = live_context(engine->i915, file); - mutex_unlock(&engine->i915->drm.struct_mutex); if (IS_ERR(ctx[count])) { err = PTR_ERR(ctx[count]); while (--count) @@ -741,10 +723,8 @@ static int active_engine(void *data) struct i915_request *old = rq[idx]; struct i915_request *new; - mutex_lock(&engine->i915->drm.struct_mutex); new = igt_request_alloc(ctx[idx], engine); if (IS_ERR(new)) { - mutex_unlock(&engine->i915->drm.struct_mutex); err = PTR_ERR(new); break; } @@ -755,7 +735,6 @@ static int active_engine(void *data) rq[idx] = i915_request_get(new); i915_request_add(new); - mutex_unlock(&engine->i915->drm.struct_mutex); err = active_request_put(old); if (err) @@ -791,13 +770,11 @@ static int __igt_reset_engines(struct intel_gt *gt, * with any other engine. */ - if (!intel_has_reset_engine(gt->i915)) + if (!intel_has_reset_engine(gt)) return 0; if (flags & TEST_ACTIVE) { - mutex_lock(>->i915->drm.struct_mutex); err = hang_init(&h, gt); - mutex_unlock(>->i915->drm.struct_mutex); if (err) return err; @@ -805,7 +782,7 @@ static int __igt_reset_engines(struct intel_gt *gt, h.ctx->sched.priority = 1024; } - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { struct active_engine threads[I915_NUM_ENGINES] = {}; unsigned long device = i915_reset_count(global); unsigned long count = 0, reported; @@ -823,7 +800,7 @@ static int __igt_reset_engines(struct intel_gt *gt, } memset(threads, 0, sizeof(threads)); - for_each_engine(other, gt->i915, tmp) { + for_each_engine(other, gt, tmp) { struct task_struct *tsk; threads[tmp].resets = @@ -849,23 +826,22 @@ static int __igt_reset_engines(struct intel_gt *gt, get_task_struct(tsk); } + yield(); /* start all threads before we begin */ + intel_engine_pm_get(engine); set_bit(I915_RESET_ENGINE + id, >->reset.flags); do { struct i915_request *rq = NULL; if (flags & TEST_ACTIVE) { - mutex_lock(>->i915->drm.struct_mutex); rq = hang_create_request(&h, engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); - mutex_unlock(>->i915->drm.struct_mutex); break; } i915_request_get(rq); i915_request_add(rq); - mutex_unlock(>->i915->drm.struct_mutex); if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(gt->i915->drm.dev); @@ -940,7 +916,7 @@ static int __igt_reset_engines(struct intel_gt *gt, } unwind: - for_each_engine(other, gt->i915, tmp) { + for_each_engine(other, gt, tmp) { int ret; if (!threads[tmp].task) @@ -977,9 +953,7 @@ unwind: if (err) break; - mutex_lock(>->i915->drm.struct_mutex); - err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); - mutex_unlock(>->i915->drm.struct_mutex); + err = igt_flush_test(gt->i915); if (err) break; } @@ -987,11 +961,8 @@ unwind: if (intel_gt_is_wedged(gt)) err = -EIO; - if (flags & TEST_ACTIVE) { - mutex_lock(>->i915->drm.struct_mutex); + if (flags & TEST_ACTIVE) hang_fini(&h); - mutex_unlock(>->i915->drm.struct_mutex); - } return err; } @@ -1047,7 +1018,7 @@ static int igt_reset_wait(void *arg) { struct intel_gt *gt = arg; struct i915_gpu_error *global = >->i915->gpu_error; - struct intel_engine_cs *engine = gt->i915->engine[RCS0]; + struct intel_engine_cs *engine = gt->engine[RCS0]; struct i915_request *rq; unsigned int reset_count; struct hang h; @@ -1061,7 +1032,6 @@ static int igt_reset_wait(void *arg) igt_global_reset_lock(gt); - mutex_lock(>->i915->drm.struct_mutex); err = hang_init(&h, gt); if (err) goto unlock; @@ -1109,7 +1079,6 @@ out_rq: fini: hang_fini(&h); unlock: - mutex_unlock(>->i915->drm.struct_mutex); igt_global_reset_unlock(gt); if (intel_gt_is_wedged(gt)) @@ -1127,15 +1096,14 @@ static int evict_vma(void *data) { struct evict_vma *arg = data; struct i915_address_space *vm = arg->vma->vm; - struct drm_i915_private *i915 = vm->i915; struct drm_mm_node evict = arg->vma->node; int err; complete(&arg->completion); - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(&vm->mutex); err = i915_gem_evict_for_node(vm, &evict, 0); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(&vm->mutex); return err; } @@ -1143,39 +1111,33 @@ static int evict_vma(void *data) static int evict_fence(void *data) { struct evict_vma *arg = data; - struct drm_i915_private *i915 = arg->vma->vm->i915; int err; complete(&arg->completion); - mutex_lock(&i915->drm.struct_mutex); - /* Mark the fence register as dirty to force the mmio update. */ err = i915_gem_object_set_tiling(arg->vma->obj, I915_TILING_Y, 512); if (err) { pr_err("Invalid Y-tiling settings; err:%d\n", err); - goto out_unlock; + return err; } err = i915_vma_pin(arg->vma, 0, 0, PIN_GLOBAL | PIN_MAPPABLE); if (err) { pr_err("Unable to pin vma for Y-tiled fence; err:%d\n", err); - goto out_unlock; + return err; } err = i915_vma_pin_fence(arg->vma); i915_vma_unpin(arg->vma); if (err) { pr_err("Unable to pin Y-tiled fence; err:%d\n", err); - goto out_unlock; + return err; } i915_vma_unpin_fence(arg->vma); -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - - return err; + return 0; } static int __igt_reset_evict_vma(struct intel_gt *gt, @@ -1183,23 +1145,26 @@ static int __igt_reset_evict_vma(struct intel_gt *gt, int (*fn)(void *), unsigned int flags) { - struct intel_engine_cs *engine = gt->i915->engine[RCS0]; + struct intel_engine_cs *engine = gt->engine[RCS0]; struct drm_i915_gem_object *obj; struct task_struct *tsk = NULL; struct i915_request *rq; struct evict_vma arg; struct hang h; + unsigned int pin_flags; int err; + if (!gt->ggtt->num_fences && flags & EXEC_OBJECT_NEEDS_FENCE) + return 0; + if (!engine || !intel_engine_can_store_dword(engine)) return 0; /* Check that we can recover an unbind stuck on a hanging request */ - mutex_lock(>->i915->drm.struct_mutex); err = hang_init(&h, gt); if (err) - goto unlock; + return err; obj = i915_gem_object_create_internal(gt->i915, SZ_1M); if (IS_ERR(obj)) { @@ -1227,10 +1192,12 @@ static int __igt_reset_evict_vma(struct intel_gt *gt, goto out_obj; } - err = i915_vma_pin(arg.vma, 0, 0, - i915_vma_is_ggtt(arg.vma) ? - PIN_GLOBAL | PIN_MAPPABLE : - PIN_USER); + pin_flags = i915_vma_is_ggtt(arg.vma) ? PIN_GLOBAL : PIN_USER; + + if (flags & EXEC_OBJECT_NEEDS_FENCE) + pin_flags |= PIN_MAPPABLE; + + err = i915_vma_pin(arg.vma, 0, 0, pin_flags); if (err) { i915_request_add(rq); goto out_obj; @@ -1262,8 +1229,6 @@ static int __igt_reset_evict_vma(struct intel_gt *gt, if (err) goto out_rq; - mutex_unlock(>->i915->drm.struct_mutex); - if (!wait_until_running(&h, rq)) { struct drm_printer p = drm_info_printer(gt->i915->drm.dev); @@ -1312,16 +1277,12 @@ out_reset: put_task_struct(tsk); } - mutex_lock(>->i915->drm.struct_mutex); out_rq: i915_request_put(rq); out_obj: i915_gem_object_put(obj); fini: hang_fini(&h); -unlock: - mutex_unlock(>->i915->drm.struct_mutex); - if (intel_gt_is_wedged(gt)) return -EIO; @@ -1340,6 +1301,7 @@ static int igt_reset_evict_ppgtt(void *arg) { struct intel_gt *gt = arg; struct i915_gem_context *ctx; + struct i915_address_space *vm; struct drm_file *file; int err; @@ -1347,18 +1309,20 @@ static int igt_reset_evict_ppgtt(void *arg) if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(>->i915->drm.struct_mutex); ctx = live_context(gt->i915, file); - mutex_unlock(>->i915->drm.struct_mutex); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out; } err = 0; - if (ctx->vm) /* aliasing == global gtt locking, covered above */ - err = __igt_reset_evict_vma(gt, ctx->vm, + vm = i915_gem_context_get_vm_rcu(ctx); + if (!i915_is_ggtt(vm)) { + /* aliasing == global gtt locking, covered above */ + err = __igt_reset_evict_vma(gt, vm, evict_vma, EXEC_OBJECT_WRITE); + } + i915_vm_put(vm); out: mock_file_free(gt->i915, file); @@ -1379,7 +1343,7 @@ static int wait_for_others(struct intel_gt *gt, struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { if (engine == exclude) continue; @@ -1403,12 +1367,11 @@ static int igt_reset_queue(void *arg) igt_global_reset_lock(gt); - mutex_lock(>->i915->drm.struct_mutex); err = hang_init(&h, gt); if (err) goto unlock; - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { struct i915_request *prev; IGT_TIMEOUT(end_time); unsigned int count; @@ -1518,7 +1481,7 @@ static int igt_reset_queue(void *arg) i915_request_put(prev); - err = igt_flush_test(gt->i915, I915_WAIT_LOCKED); + err = igt_flush_test(gt->i915); if (err) break; } @@ -1526,7 +1489,6 @@ static int igt_reset_queue(void *arg) fini: hang_fini(&h); unlock: - mutex_unlock(>->i915->drm.struct_mutex); igt_global_reset_unlock(gt); if (intel_gt_is_wedged(gt)) @@ -1539,7 +1501,7 @@ static int igt_handle_error(void *arg) { struct intel_gt *gt = arg; struct i915_gpu_error *global = >->i915->gpu_error; - struct intel_engine_cs *engine = gt->i915->engine[RCS0]; + struct intel_engine_cs *engine = gt->engine[RCS0]; struct hang h; struct i915_request *rq; struct i915_gpu_state *error; @@ -1547,17 +1509,15 @@ static int igt_handle_error(void *arg) /* Check that we can issue a global GPU and engine reset */ - if (!intel_has_reset_engine(gt->i915)) + if (!intel_has_reset_engine(gt)) return 0; if (!engine || !intel_engine_can_store_dword(engine)) return 0; - mutex_lock(>->i915->drm.struct_mutex); - err = hang_init(&h, gt); if (err) - goto err_unlock; + return err; rq = hang_create_request(&h, engine); if (IS_ERR(rq)) { @@ -1581,8 +1541,6 @@ static int igt_handle_error(void *arg) goto err_request; } - mutex_unlock(>->i915->drm.struct_mutex); - /* Temporarily disable error capture */ error = xchg(&global->first_error, (void *)-1); @@ -1590,8 +1548,6 @@ static int igt_handle_error(void *arg) xchg(&global->first_error, error); - mutex_lock(>->i915->drm.struct_mutex); - if (rq->fence.error != -EIO) { pr_err("Guilty request not identified!\n"); err = -EINVAL; @@ -1602,8 +1558,6 @@ err_request: i915_request_put(rq); err_fini: hang_fini(&h); -err_unlock: - mutex_unlock(>->i915->drm.struct_mutex); return err; } @@ -1617,7 +1571,7 @@ static int __igt_atomic_reset_engine(struct intel_engine_cs *engine, GEM_TRACE("i915_reset_engine(%s:%s) under %s\n", engine->name, mode, p->name); - tasklet_disable_nosync(t); + tasklet_disable(t); p->critical_section_begin(); err = intel_engine_reset(engine, NULL); @@ -1689,14 +1643,13 @@ static int igt_reset_engines_atomic(void *arg) /* Check that the engines resets are usable from atomic context */ - if (!intel_has_reset_engine(gt->i915)) + if (!intel_has_reset_engine(gt)) return 0; if (USES_GUC_SUBMISSION(gt->i915)) return 0; igt_global_reset_lock(gt); - mutex_lock(>->i915->drm.struct_mutex); /* Flush any requests before we get started and check basics */ if (!igt_force_reset(gt)) @@ -1706,7 +1659,7 @@ static int igt_reset_engines_atomic(void *arg) struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { err = igt_atomic_reset_engine(engine, p); if (err) goto out; @@ -1716,9 +1669,7 @@ static int igt_reset_engines_atomic(void *arg) out: /* As we poke around the guts, do a full reset before continuing. */ igt_force_reset(gt); - unlock: - mutex_unlock(>->i915->drm.struct_mutex); igt_global_reset_unlock(gt); return err; @@ -1743,27 +1694,19 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) }; struct intel_gt *gt = &i915->gt; intel_wakeref_t wakeref; - bool saved_hangcheck; int err; - if (!intel_has_gpu_reset(gt->i915)) + if (!intel_has_gpu_reset(gt)) return 0; if (intel_gt_is_wedged(gt)) return -EIO; /* we're long past hope of a successful reset */ - wakeref = intel_runtime_pm_get(>->i915->runtime_pm); - saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); - drain_delayed_work(>->hangcheck.work); /* flush param */ + wakeref = intel_runtime_pm_get(gt->uncore->rpm); err = intel_gt_live_subtests(tests, gt); - mutex_lock(>->i915->drm.struct_mutex); - igt_flush_test(gt->i915, I915_WAIT_LOCKED); - mutex_unlock(>->i915->drm.struct_mutex); - - i915_modparams.enable_hangcheck = saved_hangcheck; - intel_runtime_pm_put(>->i915->runtime_pm, wakeref); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c new file mode 100644 index 000000000000..fd3770e48ac7 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_llc.c @@ -0,0 +1,80 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include "intel_pm.h" /* intel_gpu_freq() */ +#include "selftest_llc.h" +#include "intel_rps.h" + +static int gen6_verify_ring_freq(struct intel_llc *llc) +{ + struct drm_i915_private *i915 = llc_to_gt(llc)->i915; + struct ia_constants consts; + intel_wakeref_t wakeref; + unsigned int gpu_freq; + int err = 0; + + wakeref = intel_runtime_pm_get(llc_to_gt(llc)->uncore->rpm); + + if (!get_ia_constants(llc, &consts)) { + err = -ENODEV; + goto out_rpm; + } + + for (gpu_freq = consts.min_gpu_freq; + gpu_freq <= consts.max_gpu_freq; + gpu_freq++) { + struct intel_rps *rps = &llc_to_gt(llc)->rps; + + unsigned int ia_freq, ring_freq, found; + u32 val; + + calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq); + + val = gpu_freq; + if (sandybridge_pcode_read(i915, + GEN6_PCODE_READ_MIN_FREQ_TABLE, + &val, NULL)) { + pr_err("Failed to read freq table[%d], range [%d, %d]\n", + gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq); + err = -ENXIO; + break; + } + + found = (val >> 0) & 0xff; + if (found != ia_freq) { + pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected CPU freq, found %d, expected %d\n", + gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq, + intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)), + found, ia_freq); + err = -EINVAL; + break; + } + + found = (val >> 8) & 0xff; + if (found != ring_freq) { + pr_err("Min freq table(%d/[%d, %d]):%dMHz did not match expected ring freq, found %d, expected %d\n", + gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq, + intel_gpu_freq(rps, gpu_freq * (INTEL_GEN(i915) >= 9 ? GEN9_FREQ_SCALER : 1)), + found, ring_freq); + err = -EINVAL; + break; + } + } + +out_rpm: + intel_runtime_pm_put(llc_to_gt(llc)->uncore->rpm, wakeref); + return err; +} + +int st_llc_verify(struct intel_llc *llc) +{ + int err = 0; + + if (HAS_LLC(llc_to_gt(llc)->i915)) + err = gen6_verify_ring_freq(llc); + + return err; +} diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.h b/drivers/gpu/drm/i915/gt/selftest_llc.h new file mode 100644 index 000000000000..873f896e72f2 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/selftest_llc.h @@ -0,0 +1,14 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#ifndef SELFTEST_LLC_H +#define SELFTEST_LLC_H + +struct intel_llc; + +int st_llc_verify(struct intel_llc *llc); + +#endif /* SELFTEST_LLC_H */ diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index d791158988d6..eb71ac2f992c 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -7,6 +7,7 @@ #include <linux/prime_numbers.h> #include "gem/i915_gem_pm.h" +#include "gt/intel_engine_heartbeat.h" #include "gt/intel_reset.h" #include "i915_selftest.h" @@ -19,26 +20,52 @@ #include "gem/selftests/igt_gem_utils.h" #include "gem/selftests/mock_context.h" +#define CS_GPR(engine, n) ((engine)->mmio_base + 0x600 + (n) * 4) +#define NUM_GPR_DW (16 * 2) /* each GPR is 2 dwords */ + +static struct i915_vma *create_scratch(struct intel_gt *gt) +{ + struct drm_i915_gem_object *obj; + struct i915_vma *vma; + int err; + + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + i915_gem_object_set_cache_coherency(obj, I915_CACHING_CACHED); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + i915_gem_object_put(obj); + return vma; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) { + i915_gem_object_put(obj); + return ERR_PTR(err); + } + + return vma; +} + static int live_sanitycheck(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct i915_gem_engines_iter it; struct i915_gem_context *ctx; struct intel_context *ce; struct igt_spinner spin; - intel_wakeref_t wakeref; int err = -ENOMEM; - if (!HAS_LOGICAL_RING_CONTEXTS(i915)) + if (!HAS_LOGICAL_RING_CONTEXTS(gt->i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - if (igt_spinner_init(&spin, &i915->gt)) - goto err_unlock; + if (igt_spinner_init(&spin, gt)) + return -ENOMEM; - ctx = kernel_context(i915); + ctx = kernel_context(gt->i915); if (!ctx) goto err_spin; @@ -55,13 +82,13 @@ static int live_sanitycheck(void *arg) if (!igt_wait_for_spinner(&spin, rq)) { GEM_TRACE("spinner failed to start\n"); GEM_TRACE_DUMP(); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); err = -EIO; goto err_ctx; } igt_spinner_end(&spin); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + if (igt_flush_test(gt->i915)) { err = -EIO; goto err_ctx; } @@ -73,12 +100,175 @@ err_ctx: kernel_context_close(ctx); err_spin: igt_spinner_fini(&spin); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } +static int live_unlite_restore(struct intel_gt *gt, int prio) +{ + struct intel_engine_cs *engine; + struct i915_gem_context *ctx; + enum intel_engine_id id; + struct igt_spinner spin; + int err = -ENOMEM; + + /* + * Check that we can correctly context switch between 2 instances + * on the same engine from the same parent context. + */ + + if (igt_spinner_init(&spin, gt)) + return err; + + ctx = kernel_context(gt->i915); + if (!ctx) + goto err_spin; + + err = 0; + for_each_engine(engine, gt, id) { + struct intel_context *ce[2] = {}; + struct i915_request *rq[2]; + struct igt_live_test t; + int n; + + if (prio && !intel_engine_has_preemption(engine)) + continue; + + if (!intel_engine_can_store_dword(engine)) + continue; + + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { + err = -EIO; + break; + } + + for (n = 0; n < ARRAY_SIZE(ce); n++) { + struct intel_context *tmp; + + tmp = intel_context_create(ctx, engine); + if (IS_ERR(tmp)) { + err = PTR_ERR(tmp); + goto err_ce; + } + + err = intel_context_pin(tmp); + if (err) { + intel_context_put(tmp); + goto err_ce; + } + + /* + * Setup the pair of contexts such that if we + * lite-restore using the RING_TAIL from ce[1] it + * will execute garbage from ce[0]->ring. + */ + memset(tmp->ring->vaddr, + POISON_INUSE, /* IPEHR: 0x5a5a5a5a [hung!] */ + tmp->ring->vma->size); + + ce[n] = tmp; + } + GEM_BUG_ON(!ce[1]->ring->size); + intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2); + __execlists_update_reg_state(ce[1], engine); + + rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK); + if (IS_ERR(rq[0])) { + err = PTR_ERR(rq[0]); + goto err_ce; + } + + i915_request_get(rq[0]); + i915_request_add(rq[0]); + GEM_BUG_ON(rq[0]->postfix > ce[1]->ring->emit); + + if (!igt_wait_for_spinner(&spin, rq[0])) { + i915_request_put(rq[0]); + goto err_ce; + } + + rq[1] = i915_request_create(ce[1]); + if (IS_ERR(rq[1])) { + err = PTR_ERR(rq[1]); + i915_request_put(rq[0]); + goto err_ce; + } + + if (!prio) { + /* + * Ensure we do the switch to ce[1] on completion. + * + * rq[0] is already submitted, so this should reduce + * to a no-op (a wait on a request on the same engine + * uses the submit fence, not the completion fence), + * but it will install a dependency on rq[1] for rq[0] + * that will prevent the pair being reordered by + * timeslicing. + */ + i915_request_await_dma_fence(rq[1], &rq[0]->fence); + } + + i915_request_get(rq[1]); + i915_request_add(rq[1]); + GEM_BUG_ON(rq[1]->postfix <= rq[0]->postfix); + i915_request_put(rq[0]); + + if (prio) { + struct i915_sched_attr attr = { + .priority = prio, + }; + + /* Alternatively preempt the spinner with ce[1] */ + engine->schedule(rq[1], &attr); + } + + /* And switch back to ce[0] for good measure */ + rq[0] = i915_request_create(ce[0]); + if (IS_ERR(rq[0])) { + err = PTR_ERR(rq[0]); + i915_request_put(rq[1]); + goto err_ce; + } + + i915_request_await_dma_fence(rq[0], &rq[1]->fence); + i915_request_get(rq[0]); + i915_request_add(rq[0]); + GEM_BUG_ON(rq[0]->postfix > rq[1]->postfix); + i915_request_put(rq[1]); + i915_request_put(rq[0]); + +err_ce: + tasklet_kill(&engine->execlists.tasklet); /* flush submission */ + igt_spinner_end(&spin); + for (n = 0; n < ARRAY_SIZE(ce); n++) { + if (IS_ERR_OR_NULL(ce[n])) + break; + + intel_context_unpin(ce[n]); + intel_context_put(ce[n]); + } + + if (igt_live_test_end(&t)) + err = -EIO; + if (err) + break; + } + + kernel_context_close(ctx); +err_spin: + igt_spinner_fini(&spin); + return err; +} + +static int live_unlite_switch(void *arg) +{ + return live_unlite_restore(arg, 0); +} + +static int live_unlite_preempt(void *arg) +{ + return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX)); +} + static int emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx) { @@ -131,7 +321,13 @@ semaphore_queue(struct intel_engine_cs *engine, struct i915_vma *vma, int idx) if (IS_ERR(rq)) goto out_ctx; - err = emit_semaphore_chain(rq, vma, idx); + err = 0; + if (rq->engine->emit_init_breadcrumb) + err = rq->engine->emit_init_breadcrumb(rq); + if (err == 0) + err = emit_semaphore_chain(rq, vma, idx); + if (err == 0) + i915_request_get(rq); i915_request_add(rq); if (err) rq = ERR_PTR(err); @@ -144,10 +340,10 @@ out_ctx: static int release_queue(struct intel_engine_cs *engine, struct i915_vma *vma, - int idx) + int idx, int prio) { struct i915_sched_attr attr = { - .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), + .priority = prio, }; struct i915_request *rq; u32 *cs; @@ -168,9 +364,15 @@ release_queue(struct intel_engine_cs *engine, *cs++ = 1; intel_ring_advance(rq, cs); + + i915_request_get(rq); i915_request_add(rq); + local_bh_disable(); engine->schedule(rq, &attr); + local_bh_enable(); /* kick tasklet */ + + i915_request_put(rq); return 0; } @@ -189,8 +391,7 @@ slice_semaphore_queue(struct intel_engine_cs *outer, if (IS_ERR(head)) return PTR_ERR(head); - i915_request_get(head); - for_each_engine(engine, outer->i915, id) { + for_each_engine(engine, outer->gt, id) { for (i = 0; i < count; i++) { struct i915_request *rq; @@ -199,15 +400,16 @@ slice_semaphore_queue(struct intel_engine_cs *outer, err = PTR_ERR(rq); goto out; } + + i915_request_put(rq); } } - err = release_queue(outer, vma, n); + err = release_queue(outer, vma, n, INT_MAX); if (err) goto out; - if (i915_request_wait(head, - I915_WAIT_LOCKED, + if (i915_request_wait(head, 0, 2 * RUNTIME_INFO(outer->i915)->num_engines * (count + 2) * (count + 3)) < 0) { pr_err("Failed to slice along semaphore chain of length (%d, %d)!\n", count, n); @@ -223,9 +425,8 @@ out: static int live_timeslice_preempt(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct drm_i915_gem_object *obj; - intel_wakeref_t wakeref; struct i915_vma *vma; void *vaddr; int err = 0; @@ -239,17 +440,14 @@ static int live_timeslice_preempt(void *arg) * need to preempt the current task and replace it with another * ready task. */ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return 0; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto err_unlock; - } + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; @@ -269,7 +467,7 @@ static int live_timeslice_preempt(void *arg) struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { if (!intel_engine_has_preemption(engine)) continue; @@ -279,7 +477,7 @@ static int live_timeslice_preempt(void *arg) if (err) goto err_pin; - if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + if (igt_flush_test(gt->i915)) { err = -EIO; goto err_pin; } @@ -292,22 +490,168 @@ err_map: i915_gem_object_unpin_map(obj); err_obj: i915_gem_object_put(obj); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); + return err; +} + +static struct i915_request *nop_request(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + return rq; + + i915_request_get(rq); + i915_request_add(rq); + return rq; +} + +static void wait_for_submit(struct intel_engine_cs *engine, + struct i915_request *rq) +{ + do { + cond_resched(); + intel_engine_flush_submission(engine); + } while (!i915_request_is_active(rq)); +} + +static long timeslice_threshold(const struct intel_engine_cs *engine) +{ + return 2 * msecs_to_jiffies_timeout(timeslice(engine)) + 1; +} + +static int live_timeslice_queue(void *arg) +{ + struct intel_gt *gt = arg; + struct drm_i915_gem_object *obj; + struct intel_engine_cs *engine; + enum intel_engine_id id; + struct i915_vma *vma; + void *vaddr; + int err = 0; + + /* + * Make sure that even if ELSP[0] and ELSP[1] are filled with + * timeslicing between them disabled, we *do* enable timeslicing + * if the queue demands it. (Normally, we do not submit if + * ELSP[1] is already occupied, so must rely on timeslicing to + * eject ELSP[0] in favour of the queue.) + */ + if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION)) + return 0; + + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto err_obj; + } + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto err_obj; + } + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL); + if (err) + goto err_map; + + for_each_engine(engine, gt, id) { + struct i915_sched_attr attr = { + .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), + }; + struct i915_request *rq, *nop; + + if (!intel_engine_has_preemption(engine)) + continue; + + memset(vaddr, 0, PAGE_SIZE); + + /* ELSP[0]: semaphore wait */ + rq = semaphore_queue(engine, vma, 0); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_pin; + } + engine->schedule(rq, &attr); + wait_for_submit(engine, rq); + + /* ELSP[1]: nop request */ + nop = nop_request(engine); + if (IS_ERR(nop)) { + err = PTR_ERR(nop); + i915_request_put(rq); + goto err_pin; + } + wait_for_submit(engine, nop); + i915_request_put(nop); + + GEM_BUG_ON(i915_request_completed(rq)); + GEM_BUG_ON(execlists_active(&engine->execlists) != rq); + + /* Queue: semaphore signal, matching priority as semaphore */ + err = release_queue(engine, vma, 1, effective_prio(rq)); + if (err) { + i915_request_put(rq); + goto err_pin; + } + + intel_engine_flush_submission(engine); + if (!READ_ONCE(engine->execlists.timer.expires) && + !i915_request_completed(rq)) { + struct drm_printer p = + drm_info_printer(gt->i915->drm.dev); + + GEM_TRACE_ERR("%s: Failed to enable timeslicing!\n", + engine->name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + GEM_TRACE_DUMP(); + + memset(vaddr, 0xff, PAGE_SIZE); + err = -EINVAL; + } + + /* Timeslice every jiffy, so within 2 we should signal */ + if (i915_request_wait(rq, 0, timeslice_threshold(engine)) < 0) { + struct drm_printer p = + drm_info_printer(gt->i915->drm.dev); + + pr_err("%s: Failed to timeslice into queue\n", + engine->name); + intel_engine_dump(engine, &p, + "%s\n", engine->name); + + memset(vaddr, 0xff, PAGE_SIZE); + err = -EIO; + } + i915_request_put(rq); + if (err) + break; + } + +err_pin: + i915_vma_unpin(vma); +err_map: + i915_gem_object_unpin_map(obj); +err_obj: + i915_gem_object_put(obj); return err; } static int live_busywait_preempt(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct i915_gem_context *ctx_hi, *ctx_lo; struct intel_engine_cs *engine; struct drm_i915_gem_object *obj; struct i915_vma *vma; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; u32 *map; @@ -316,22 +660,19 @@ static int live_busywait_preempt(void *arg) * preempt the busywaits used to synchronise between rings. */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - ctx_hi = kernel_context(i915); + ctx_hi = kernel_context(gt->i915); if (!ctx_hi) - goto err_unlock; + return -ENOMEM; ctx_hi->sched.priority = I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); - ctx_lo = kernel_context(i915); + ctx_lo = kernel_context(gt->i915); if (!ctx_lo) goto err_ctx_hi; ctx_lo->sched.priority = I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); - obj = i915_gem_object_create_internal(i915, PAGE_SIZE); + obj = i915_gem_object_create_internal(gt->i915, PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto err_ctx_lo; @@ -343,7 +684,7 @@ static int live_busywait_preempt(void *arg) goto err_obj; } - vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL); + vma = i915_vma_instance(obj, >->ggtt->vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_map; @@ -353,7 +694,7 @@ static int live_busywait_preempt(void *arg) if (err) goto err_map; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { struct i915_request *lo, *hi; struct igt_live_test t; u32 *cs; @@ -364,7 +705,7 @@ static int live_busywait_preempt(void *arg) if (!intel_engine_can_store_dword(engine)) continue; - if (igt_live_test_begin(&t, i915, __func__, engine->name)) { + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { err = -EIO; goto err_vma; } @@ -444,7 +785,7 @@ static int live_busywait_preempt(void *arg) i915_request_add(hi); if (i915_request_wait(lo, 0, HZ / 5) < 0) { - struct drm_printer p = drm_info_printer(i915->drm.dev); + struct drm_printer p = drm_info_printer(gt->i915->drm.dev); pr_err("%s: Failed to preempt semaphore busywait!\n", engine->name); @@ -452,7 +793,7 @@ static int live_busywait_preempt(void *arg) intel_engine_dump(engine, &p, "%s\n", engine->name); GEM_TRACE_DUMP(); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); err = -EIO; goto err_vma; } @@ -475,9 +816,6 @@ err_ctx_lo: kernel_context_close(ctx_lo); err_ctx_hi: kernel_context_close(ctx_hi); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -501,49 +839,45 @@ spinner_create_request(struct igt_spinner *spin, static int live_preempt(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct i915_gem_context *ctx_hi, *ctx_lo; struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) return 0; - if (!(i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) + if (!(gt->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION)) pr_err("Logical preemption supported, but not exposed\n"); - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - if (igt_spinner_init(&spin_hi, &i915->gt)) - goto err_unlock; + if (igt_spinner_init(&spin_hi, gt)) + return -ENOMEM; - if (igt_spinner_init(&spin_lo, &i915->gt)) + if (igt_spinner_init(&spin_lo, gt)) goto err_spin_hi; - ctx_hi = kernel_context(i915); + ctx_hi = kernel_context(gt->i915); if (!ctx_hi) goto err_spin_lo; ctx_hi->sched.priority = I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); - ctx_lo = kernel_context(i915); + ctx_lo = kernel_context(gt->i915); if (!ctx_lo) goto err_ctx_hi; ctx_lo->sched.priority = I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { struct igt_live_test t; struct i915_request *rq; if (!intel_engine_has_preemption(engine)) continue; - if (igt_live_test_begin(&t, i915, __func__, engine->name)) { + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { err = -EIO; goto err_ctx_lo; } @@ -559,7 +893,7 @@ static int live_preempt(void *arg) if (!igt_wait_for_spinner(&spin_lo, rq)) { GEM_TRACE("lo spinner failed to start\n"); GEM_TRACE_DUMP(); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); err = -EIO; goto err_ctx_lo; } @@ -576,7 +910,7 @@ static int live_preempt(void *arg) if (!igt_wait_for_spinner(&spin_hi, rq)) { GEM_TRACE("hi spinner failed to start\n"); GEM_TRACE_DUMP(); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); err = -EIO; goto err_ctx_lo; } @@ -599,54 +933,47 @@ err_spin_lo: igt_spinner_fini(&spin_lo); err_spin_hi: igt_spinner_fini(&spin_hi); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } static int live_late_preempt(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct i915_gem_context *ctx_hi, *ctx_lo; struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; struct i915_sched_attr attr = {}; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - if (igt_spinner_init(&spin_hi, &i915->gt)) - goto err_unlock; + if (igt_spinner_init(&spin_hi, gt)) + return -ENOMEM; - if (igt_spinner_init(&spin_lo, &i915->gt)) + if (igt_spinner_init(&spin_lo, gt)) goto err_spin_hi; - ctx_hi = kernel_context(i915); + ctx_hi = kernel_context(gt->i915); if (!ctx_hi) goto err_spin_lo; - ctx_lo = kernel_context(i915); + ctx_lo = kernel_context(gt->i915); if (!ctx_lo) goto err_ctx_hi; /* Make sure ctx_lo stays before ctx_hi until we trigger preemption. */ ctx_lo->sched.priority = I915_USER_PRIORITY(1); - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { struct igt_live_test t; struct i915_request *rq; if (!intel_engine_has_preemption(engine)) continue; - if (igt_live_test_begin(&t, i915, __func__, engine->name)) { + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { err = -EIO; goto err_ctx_lo; } @@ -705,15 +1032,12 @@ err_spin_lo: igt_spinner_fini(&spin_lo); err_spin_hi: igt_spinner_fini(&spin_hi); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; err_wedged: igt_spinner_end(&spin_hi); igt_spinner_end(&spin_lo); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); err = -EIO; goto err_ctx_lo; } @@ -723,14 +1047,13 @@ struct preempt_client { struct i915_gem_context *ctx; }; -static int preempt_client_init(struct drm_i915_private *i915, - struct preempt_client *c) +static int preempt_client_init(struct intel_gt *gt, struct preempt_client *c) { - c->ctx = kernel_context(i915); + c->ctx = kernel_context(gt->i915); if (!c->ctx) return -ENOMEM; - if (igt_spinner_init(&c->spin, &i915->gt)) + if (igt_spinner_init(&c->spin, gt)) goto err_ctx; return 0; @@ -748,11 +1071,10 @@ static void preempt_client_fini(struct preempt_client *c) static int live_nopreempt(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct intel_engine_cs *engine; struct preempt_client a, b; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; /* @@ -760,19 +1082,16 @@ static int live_nopreempt(void *arg) * that may be being observed and not want to be interrupted. */ - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - if (preempt_client_init(i915, &a)) - goto err_unlock; - if (preempt_client_init(i915, &b)) + if (preempt_client_init(gt, &a)) + return -ENOMEM; + if (preempt_client_init(gt, &b)) goto err_client_a; b.ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX); - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { struct i915_request *rq_a, *rq_b; if (!intel_engine_has_preemption(engine)) @@ -832,7 +1151,7 @@ static int live_nopreempt(void *arg) goto err_wedged; } - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(gt->i915)) goto err_wedged; } @@ -841,29 +1160,344 @@ err_client_b: preempt_client_fini(&b); err_client_a: preempt_client_fini(&a); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; err_wedged: igt_spinner_end(&b.spin); igt_spinner_end(&a.spin); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); err = -EIO; goto err_client_b; } +struct live_preempt_cancel { + struct intel_engine_cs *engine; + struct preempt_client a, b; +}; + +static int __cancel_active0(struct live_preempt_cancel *arg) +{ + struct i915_request *rq; + struct igt_live_test t; + int err; + + /* Preempt cancel of ELSP0 */ + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + if (igt_live_test_begin(&t, arg->engine->i915, + __func__, arg->engine->name)) + return -EIO; + + clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags); + rq = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_get(rq); + i915_request_add(rq); + if (!igt_wait_for_spinner(&arg->a.spin, rq)) { + err = -EIO; + goto out; + } + + i915_gem_context_set_banned(arg->a.ctx); + err = intel_engine_pulse(arg->engine); + if (err) + goto out; + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = -EIO; + goto out; + } + + if (rq->fence.error != -EIO) { + pr_err("Cancelled inflight0 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq); + if (igt_live_test_end(&t)) + err = -EIO; + return err; +} + +static int __cancel_active1(struct live_preempt_cancel *arg) +{ + struct i915_request *rq[2] = {}; + struct igt_live_test t; + int err; + + /* Preempt cancel of ELSP1 */ + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + if (igt_live_test_begin(&t, arg->engine->i915, + __func__, arg->engine->name)) + return -EIO; + + clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags); + rq[0] = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_NOOP); /* no preemption */ + if (IS_ERR(rq[0])) + return PTR_ERR(rq[0]); + + i915_request_get(rq[0]); + i915_request_add(rq[0]); + if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { + err = -EIO; + goto out; + } + + clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags); + rq[1] = spinner_create_request(&arg->b.spin, + arg->b.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq[1])) { + err = PTR_ERR(rq[1]); + goto out; + } + + i915_request_get(rq[1]); + err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); + i915_request_add(rq[1]); + if (err) + goto out; + + i915_gem_context_set_banned(arg->b.ctx); + err = intel_engine_pulse(arg->engine); + if (err) + goto out; + + igt_spinner_end(&arg->a.spin); + if (i915_request_wait(rq[1], 0, HZ / 5) < 0) { + err = -EIO; + goto out; + } + + if (rq[0]->fence.error != 0) { + pr_err("Normal inflight0 request did not complete\n"); + err = -EINVAL; + goto out; + } + + if (rq[1]->fence.error != -EIO) { + pr_err("Cancelled inflight1 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq[1]); + i915_request_put(rq[0]); + if (igt_live_test_end(&t)) + err = -EIO; + return err; +} + +static int __cancel_queued(struct live_preempt_cancel *arg) +{ + struct i915_request *rq[3] = {}; + struct igt_live_test t; + int err; + + /* Full ELSP and one in the wings */ + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + if (igt_live_test_begin(&t, arg->engine->i915, + __func__, arg->engine->name)) + return -EIO; + + clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags); + rq[0] = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq[0])) + return PTR_ERR(rq[0]); + + i915_request_get(rq[0]); + i915_request_add(rq[0]); + if (!igt_wait_for_spinner(&arg->a.spin, rq[0])) { + err = -EIO; + goto out; + } + + clear_bit(CONTEXT_BANNED, &arg->b.ctx->flags); + rq[1] = igt_request_alloc(arg->b.ctx, arg->engine); + if (IS_ERR(rq[1])) { + err = PTR_ERR(rq[1]); + goto out; + } + + i915_request_get(rq[1]); + err = i915_request_await_dma_fence(rq[1], &rq[0]->fence); + i915_request_add(rq[1]); + if (err) + goto out; + + rq[2] = spinner_create_request(&arg->b.spin, + arg->a.ctx, arg->engine, + MI_ARB_CHECK); + if (IS_ERR(rq[2])) { + err = PTR_ERR(rq[2]); + goto out; + } + + i915_request_get(rq[2]); + err = i915_request_await_dma_fence(rq[2], &rq[1]->fence); + i915_request_add(rq[2]); + if (err) + goto out; + + i915_gem_context_set_banned(arg->a.ctx); + err = intel_engine_pulse(arg->engine); + if (err) + goto out; + + if (i915_request_wait(rq[2], 0, HZ / 5) < 0) { + err = -EIO; + goto out; + } + + if (rq[0]->fence.error != -EIO) { + pr_err("Cancelled inflight0 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + + if (rq[1]->fence.error != 0) { + pr_err("Normal inflight1 request did not complete\n"); + err = -EINVAL; + goto out; + } + + if (rq[2]->fence.error != -EIO) { + pr_err("Cancelled queued request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq[2]); + i915_request_put(rq[1]); + i915_request_put(rq[0]); + if (igt_live_test_end(&t)) + err = -EIO; + return err; +} + +static int __cancel_hostile(struct live_preempt_cancel *arg) +{ + struct i915_request *rq; + int err; + + /* Preempt cancel non-preemptible spinner in ELSP0 */ + if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + return 0; + + GEM_TRACE("%s(%s)\n", __func__, arg->engine->name); + clear_bit(CONTEXT_BANNED, &arg->a.ctx->flags); + rq = spinner_create_request(&arg->a.spin, + arg->a.ctx, arg->engine, + MI_NOOP); /* preemption disabled */ + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_get(rq); + i915_request_add(rq); + if (!igt_wait_for_spinner(&arg->a.spin, rq)) { + err = -EIO; + goto out; + } + + i915_gem_context_set_banned(arg->a.ctx); + err = intel_engine_pulse(arg->engine); /* force reset */ + if (err) + goto out; + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = -EIO; + goto out; + } + + if (rq->fence.error != -EIO) { + pr_err("Cancelled inflight0 request did not report -EIO\n"); + err = -EINVAL; + goto out; + } + +out: + i915_request_put(rq); + if (igt_flush_test(arg->engine->i915)) + err = -EIO; + return err; +} + +static int live_preempt_cancel(void *arg) +{ + struct intel_gt *gt = arg; + struct live_preempt_cancel data; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * To cancel an inflight context, we need to first remove it from the + * GPU. That sounds like preemption! Plus a little bit of bookkeeping. + */ + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + if (preempt_client_init(gt, &data.a)) + return -ENOMEM; + if (preempt_client_init(gt, &data.b)) + goto err_client_a; + + for_each_engine(data.engine, gt, id) { + if (!intel_engine_has_preemption(data.engine)) + continue; + + err = __cancel_active0(&data); + if (err) + goto err_wedged; + + err = __cancel_active1(&data); + if (err) + goto err_wedged; + + err = __cancel_queued(&data); + if (err) + goto err_wedged; + + err = __cancel_hostile(&data); + if (err) + goto err_wedged; + } + + err = 0; +err_client_b: + preempt_client_fini(&data.b); +err_client_a: + preempt_client_fini(&data.a); + return err; + +err_wedged: + GEM_TRACE_DUMP(); + igt_spinner_end(&data.b.spin); + igt_spinner_end(&data.a.spin); + intel_gt_set_wedged(gt); + goto err_client_b; +} + static int live_suppress_self_preempt(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct intel_engine_cs *engine; struct i915_sched_attr attr = { .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX) }; struct preempt_client a, b; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; /* @@ -873,30 +1507,31 @@ static int live_suppress_self_preempt(void *arg) * completion event. */ - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) return 0; - if (USES_GUC_SUBMISSION(i915)) + if (USES_GUC_SUBMISSION(gt->i915)) return 0; /* presume black blox */ - if (intel_vgpu_active(i915)) + if (intel_vgpu_active(gt->i915)) return 0; /* GVT forces single port & request submission */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - if (preempt_client_init(i915, &a)) - goto err_unlock; - if (preempt_client_init(i915, &b)) + if (preempt_client_init(gt, &a)) + return -ENOMEM; + if (preempt_client_init(gt, &b)) goto err_client_a; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { struct i915_request *rq_a, *rq_b; int depth; if (!intel_engine_has_preemption(engine)) continue; + if (igt_flush_test(gt->i915)) + goto err_wedged; + + intel_engine_pm_get(engine); engine->execlists.preempt_hang.count = 0; rq_a = spinner_create_request(&a.spin, @@ -904,12 +1539,14 @@ static int live_suppress_self_preempt(void *arg) MI_NOOP); if (IS_ERR(rq_a)) { err = PTR_ERR(rq_a); + intel_engine_pm_put(engine); goto err_client_b; } i915_request_add(rq_a); if (!igt_wait_for_spinner(&a.spin, rq_a)) { pr_err("First client failed to start\n"); + intel_engine_pm_put(engine); goto err_wedged; } @@ -921,6 +1558,7 @@ static int live_suppress_self_preempt(void *arg) MI_NOOP); if (IS_ERR(rq_b)) { err = PTR_ERR(rq_b); + intel_engine_pm_put(engine); goto err_client_b; } i915_request_add(rq_b); @@ -931,6 +1569,7 @@ static int live_suppress_self_preempt(void *arg) if (!igt_wait_for_spinner(&b.spin, rq_b)) { pr_err("Second client failed to start\n"); + intel_engine_pm_put(engine); goto err_wedged; } @@ -944,11 +1583,13 @@ static int live_suppress_self_preempt(void *arg) engine->name, engine->execlists.preempt_hang.count, depth); + intel_engine_pm_put(engine); err = -EINVAL; goto err_client_b; } - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + intel_engine_pm_put(engine); + if (igt_flush_test(gt->i915)) goto err_wedged; } @@ -957,15 +1598,12 @@ err_client_b: preempt_client_fini(&b); err_client_a: preempt_client_fini(&a); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; err_wedged: igt_spinner_end(&b.spin); igt_spinner_end(&a.spin); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); err = -EIO; goto err_client_b; } @@ -984,9 +1622,13 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine) if (!rq) return NULL; - INIT_LIST_HEAD(&rq->active_list); rq->engine = engine; + spin_lock_init(&rq->lock); + INIT_LIST_HEAD(&rq->fence.cb_list); + rq->fence.lock = &rq->lock; + rq->fence.ops = &i915_fence_ops; + i915_sched_node_init(&rq->sched); /* mark this request as permanently incomplete */ @@ -1021,11 +1663,10 @@ static void dummy_request_free(struct i915_request *dummy) static int live_suppress_wait_preempt(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct preempt_client client[4]; struct intel_engine_cs *engine; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; int i; @@ -1035,22 +1676,19 @@ static int live_suppress_wait_preempt(void *arg) * not needlessly generate preempt-to-idle cycles. */ - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - if (preempt_client_init(i915, &client[0])) /* ELSP[0] */ - goto err_unlock; - if (preempt_client_init(i915, &client[1])) /* ELSP[1] */ + if (preempt_client_init(gt, &client[0])) /* ELSP[0] */ + return -ENOMEM; + if (preempt_client_init(gt, &client[1])) /* ELSP[1] */ goto err_client_0; - if (preempt_client_init(i915, &client[2])) /* head of queue */ + if (preempt_client_init(gt, &client[2])) /* head of queue */ goto err_client_1; - if (preempt_client_init(i915, &client[3])) /* bystander */ + if (preempt_client_init(gt, &client[3])) /* bystander */ goto err_client_2; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { int depth; if (!intel_engine_has_preemption(engine)) @@ -1079,8 +1717,8 @@ static int live_suppress_wait_preempt(void *arg) } /* Disable NEWCLIENT promotion */ - __i915_active_request_set(&rq[i]->timeline->last_request, - dummy); + __i915_active_fence_set(&i915_request_timeline(rq[i])->last_request, + &dummy->fence); i915_request_add(rq[i]); } @@ -1105,7 +1743,7 @@ static int live_suppress_wait_preempt(void *arg) for (i = 0; i < ARRAY_SIZE(client); i++) igt_spinner_end(&client[i].spin); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(gt->i915)) goto err_wedged; if (engine->execlists.preempt_hang.count) { @@ -1128,26 +1766,22 @@ err_client_1: preempt_client_fini(&client[1]); err_client_0: preempt_client_fini(&client[0]); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; err_wedged: for (i = 0; i < ARRAY_SIZE(client); i++) igt_spinner_end(&client[i].spin); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); err = -EIO; goto err_client_3; } static int live_chain_preempt(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct intel_engine_cs *engine; struct preempt_client hi, lo; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; /* @@ -1156,19 +1790,16 @@ static int live_chain_preempt(void *arg) * the previously submitted spinner in B. */ - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - if (preempt_client_init(i915, &hi)) - goto err_unlock; + if (preempt_client_init(gt, &hi)) + return -ENOMEM; - if (preempt_client_init(i915, &lo)) + if (preempt_client_init(gt, &lo)) goto err_client_hi; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { struct i915_sched_attr attr = { .priority = I915_USER_PRIORITY(I915_PRIORITY_MAX), }; @@ -1199,7 +1830,7 @@ static int live_chain_preempt(void *arg) goto err_wedged; } - if (igt_live_test_begin(&t, i915, __func__, engine->name)) { + if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) { err = -EIO; goto err_wedged; } @@ -1237,7 +1868,7 @@ static int live_chain_preempt(void *arg) igt_spinner_end(&hi.spin); if (i915_request_wait(rq, 0, HZ / 5) < 0) { struct drm_printer p = - drm_info_printer(i915->drm.dev); + drm_info_printer(gt->i915->drm.dev); pr_err("Failed to preempt over chain of %d\n", count); @@ -1253,7 +1884,7 @@ static int live_chain_preempt(void *arg) i915_request_add(rq); if (i915_request_wait(rq, 0, HZ / 5) < 0) { struct drm_printer p = - drm_info_printer(i915->drm.dev); + drm_info_printer(gt->i915->drm.dev); pr_err("Failed to flush low priority chain of %d requests\n", count); @@ -1274,57 +1905,50 @@ err_client_lo: preempt_client_fini(&lo); err_client_hi: preempt_client_fini(&hi); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; err_wedged: igt_spinner_end(&hi.spin); igt_spinner_end(&lo.spin); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); err = -EIO; goto err_client_lo; } static int live_preempt_hang(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct i915_gem_context *ctx_hi, *ctx_lo; struct igt_spinner spin_hi, spin_lo; struct intel_engine_cs *engine; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = -ENOMEM; - if (!HAS_LOGICAL_RING_PREEMPTION(i915)) + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) return 0; - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(gt)) return 0; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - if (igt_spinner_init(&spin_hi, &i915->gt)) - goto err_unlock; + if (igt_spinner_init(&spin_hi, gt)) + return -ENOMEM; - if (igt_spinner_init(&spin_lo, &i915->gt)) + if (igt_spinner_init(&spin_lo, gt)) goto err_spin_hi; - ctx_hi = kernel_context(i915); + ctx_hi = kernel_context(gt->i915); if (!ctx_hi) goto err_spin_lo; ctx_hi->sched.priority = I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); - ctx_lo = kernel_context(i915); + ctx_lo = kernel_context(gt->i915); if (!ctx_lo) goto err_ctx_hi; ctx_lo->sched.priority = I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { struct i915_request *rq; if (!intel_engine_has_preemption(engine)) @@ -1341,7 +1965,7 @@ static int live_preempt_hang(void *arg) if (!igt_wait_for_spinner(&spin_lo, rq)) { GEM_TRACE("lo spinner failed to start\n"); GEM_TRACE_DUMP(); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); err = -EIO; goto err_ctx_lo; } @@ -1363,28 +1987,28 @@ static int live_preempt_hang(void *arg) HZ / 10)) { pr_err("Preemption did not occur within timeout!"); GEM_TRACE_DUMP(); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); err = -EIO; goto err_ctx_lo; } - set_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags); + set_bit(I915_RESET_ENGINE + id, >->reset.flags); intel_engine_reset(engine, NULL); - clear_bit(I915_RESET_ENGINE + id, &i915->gt.reset.flags); + clear_bit(I915_RESET_ENGINE + id, >->reset.flags); engine->execlists.preempt_hang.inject_hang = false; if (!igt_wait_for_spinner(&spin_hi, rq)) { GEM_TRACE("hi spinner failed to start\n"); GEM_TRACE_DUMP(); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); err = -EIO; goto err_ctx_lo; } igt_spinner_end(&spin_hi); igt_spinner_end(&spin_lo); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) { + if (igt_flush_test(gt->i915)) { err = -EIO; goto err_ctx_lo; } @@ -1399,9 +2023,105 @@ err_spin_lo: igt_spinner_fini(&spin_lo); err_spin_hi: igt_spinner_fini(&spin_hi); -err_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); + return err; +} + +static int live_preempt_timeout(void *arg) +{ + struct intel_gt *gt = arg; + struct i915_gem_context *ctx_hi, *ctx_lo; + struct igt_spinner spin_lo; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = -ENOMEM; + + /* + * Check that we force preemption to occur by cancelling the previous + * context if it refuses to yield the GPU. + */ + if (!IS_ACTIVE(CONFIG_DRM_I915_PREEMPT_TIMEOUT)) + return 0; + + if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915)) + return 0; + + if (!intel_has_reset_engine(gt)) + return 0; + + if (igt_spinner_init(&spin_lo, gt)) + return -ENOMEM; + + ctx_hi = kernel_context(gt->i915); + if (!ctx_hi) + goto err_spin_lo; + ctx_hi->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY); + + ctx_lo = kernel_context(gt->i915); + if (!ctx_lo) + goto err_ctx_hi; + ctx_lo->sched.priority = + I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY); + + for_each_engine(engine, gt, id) { + unsigned long saved_timeout; + struct i915_request *rq; + + if (!intel_engine_has_preemption(engine)) + continue; + + rq = spinner_create_request(&spin_lo, ctx_lo, engine, + MI_NOOP); /* preemption disabled */ + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + i915_request_add(rq); + if (!igt_wait_for_spinner(&spin_lo, rq)) { + intel_gt_set_wedged(gt); + err = -EIO; + goto err_ctx_lo; + } + + rq = igt_request_alloc(ctx_hi, engine); + if (IS_ERR(rq)) { + igt_spinner_end(&spin_lo); + err = PTR_ERR(rq); + goto err_ctx_lo; + } + + /* Flush the previous CS ack before changing timeouts */ + while (READ_ONCE(engine->execlists.pending[0])) + cpu_relax(); + + saved_timeout = engine->props.preempt_timeout_ms; + engine->props.preempt_timeout_ms = 1; /* in ms, -> 1 jiffie */ + + i915_request_get(rq); + i915_request_add(rq); + + intel_engine_flush_submission(engine); + engine->props.preempt_timeout_ms = saved_timeout; + + if (i915_request_wait(rq, 0, HZ / 10) < 0) { + intel_gt_set_wedged(gt); + i915_request_put(rq); + err = -ETIME; + goto err_ctx_lo; + } + + igt_spinner_end(&spin_lo); + i915_request_put(rq); + } + + err = 0; +err_ctx_lo: + kernel_context_close(ctx_lo); +err_ctx_hi: + kernel_context_close(ctx_hi); +err_spin_lo: + igt_spinner_fini(&spin_lo); return err; } @@ -1416,7 +2136,7 @@ static int random_priority(struct rnd_state *rnd) } struct preempt_smoke { - struct drm_i915_private *i915; + struct intel_gt *gt; struct i915_gem_context **contexts; struct intel_engine_cs *engine; struct drm_i915_gem_object *batch; @@ -1440,7 +2160,11 @@ static int smoke_submit(struct preempt_smoke *smoke, int err = 0; if (batch) { - vma = i915_vma_instance(batch, ctx->vm, NULL); + struct i915_address_space *vm; + + vm = i915_gem_context_get_vm_rcu(ctx); + vma = i915_vma_instance(batch, vm, NULL); + i915_vm_put(vm); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -1489,11 +2213,9 @@ static int smoke_crescendo_thread(void *arg) struct i915_gem_context *ctx = smoke_context(smoke); int err; - mutex_lock(&smoke->i915->drm.struct_mutex); err = smoke_submit(smoke, ctx, count % I915_PRIORITY_MAX, smoke->batch); - mutex_unlock(&smoke->i915->drm.struct_mutex); if (err) return err; @@ -1514,9 +2236,7 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) unsigned long count; int err = 0; - mutex_unlock(&smoke->i915->drm.struct_mutex); - - for_each_engine(engine, smoke->i915, id) { + for_each_engine(engine, smoke->gt, id) { arg[id] = *smoke; arg[id].engine = engine; if (!(flags & BATCH)) @@ -1532,8 +2252,10 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) get_task_struct(tsk[id]); } + yield(); /* start all threads before we kthread_stop() */ + count = 0; - for_each_engine(engine, smoke->i915, id) { + for_each_engine(engine, smoke->gt, id) { int status; if (IS_ERR_OR_NULL(tsk[id])) @@ -1548,11 +2270,9 @@ static int smoke_crescendo(struct preempt_smoke *smoke, unsigned int flags) put_task_struct(tsk[id]); } - mutex_lock(&smoke->i915->drm.struct_mutex); - pr_info("Submitted %lu crescendo:%x requests across %d engines and %d contexts\n", count, flags, - RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); + RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext); return 0; } @@ -1564,7 +2284,7 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) count = 0; do { - for_each_engine(smoke->engine, smoke->i915, id) { + for_each_engine(smoke->engine, smoke->gt, id) { struct i915_gem_context *ctx = smoke_context(smoke); int err; @@ -1580,25 +2300,24 @@ static int smoke_random(struct preempt_smoke *smoke, unsigned int flags) pr_info("Submitted %lu random:%x requests across %d engines and %d contexts\n", count, flags, - RUNTIME_INFO(smoke->i915)->num_engines, smoke->ncontext); + RUNTIME_INFO(smoke->gt->i915)->num_engines, smoke->ncontext); return 0; } static int live_preempt_smoke(void *arg) { struct preempt_smoke smoke = { - .i915 = arg, + .gt = arg, .prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed), .ncontext = 1024, }; const unsigned int phase[] = { 0, BATCH }; - intel_wakeref_t wakeref; struct igt_live_test t; int err = -ENOMEM; u32 *cs; int n; - if (!HAS_LOGICAL_RING_PREEMPTION(smoke.i915)) + if (!HAS_LOGICAL_RING_PREEMPTION(smoke.gt->i915)) return 0; smoke.contexts = kmalloc_array(smoke.ncontext, @@ -1607,13 +2326,11 @@ static int live_preempt_smoke(void *arg) if (!smoke.contexts) return -ENOMEM; - mutex_lock(&smoke.i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm); - - smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); + smoke.batch = + i915_gem_object_create_internal(smoke.gt->i915, PAGE_SIZE); if (IS_ERR(smoke.batch)) { err = PTR_ERR(smoke.batch); - goto err_unlock; + goto err_free; } cs = i915_gem_object_pin_map(smoke.batch, I915_MAP_WB); @@ -1627,13 +2344,13 @@ static int live_preempt_smoke(void *arg) i915_gem_object_flush_map(smoke.batch); i915_gem_object_unpin_map(smoke.batch); - if (igt_live_test_begin(&t, smoke.i915, __func__, "all")) { + if (igt_live_test_begin(&t, smoke.gt->i915, __func__, "all")) { err = -EIO; goto err_batch; } for (n = 0; n < smoke.ncontext; n++) { - smoke.contexts[n] = kernel_context(smoke.i915); + smoke.contexts[n] = kernel_context(smoke.gt->i915); if (!smoke.contexts[n]) goto err_ctx; } @@ -1660,15 +2377,13 @@ err_ctx: err_batch: i915_gem_object_put(smoke.batch); -err_unlock: - intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref); - mutex_unlock(&smoke.i915->drm.struct_mutex); +err_free: kfree(smoke.contexts); return err; } -static int nop_virtual_engine(struct drm_i915_private *i915, +static int nop_virtual_engine(struct intel_gt *gt, struct intel_engine_cs **siblings, unsigned int nsibling, unsigned int nctx, @@ -1687,7 +2402,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915, GEM_BUG_ON(!nctx || nctx > ARRAY_SIZE(ctx)); for (n = 0; n < nctx; n++) { - ctx[n] = kernel_context(i915); + ctx[n] = kernel_context(gt->i915); if (!ctx[n]) { err = -ENOMEM; nctx = n; @@ -1712,7 +2427,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915, } } - err = igt_live_test_begin(&t, i915, __func__, ve[0]->engine->name); + err = igt_live_test_begin(&t, gt->i915, __func__, ve[0]->engine->name); if (err) goto out; @@ -1759,7 +2474,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915, request[nc]->fence.context, request[nc]->fence.seqno); GEM_TRACE_DUMP(); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); break; } } @@ -1781,7 +2496,7 @@ static int nop_virtual_engine(struct drm_i915_private *i915, prime, div64_u64(ktime_to_ns(times[1]), prime)); out: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(gt->i915)) err = -EIO; for (nc = 0; nc < nctx; nc++) { @@ -1794,25 +2509,22 @@ out: static int live_virtual_engine(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; struct intel_engine_cs *engine; - struct intel_gt *gt = &i915->gt; enum intel_engine_id id; unsigned int class, inst; - int err = -ENODEV; + int err; - if (USES_GUC_SUBMISSION(i915)) + if (USES_GUC_SUBMISSION(gt->i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - - for_each_engine(engine, i915, id) { - err = nop_virtual_engine(i915, &engine, 1, 1, 0); + for_each_engine(engine, gt, id) { + err = nop_virtual_engine(gt, &engine, 1, 1, 0); if (err) { pr_err("Failed to wrap engine %s: err=%d\n", engine->name, err); - goto out_unlock; + return err; } } @@ -1830,23 +2542,21 @@ static int live_virtual_engine(void *arg) continue; for (n = 1; n <= nsibling + 1; n++) { - err = nop_virtual_engine(i915, siblings, nsibling, + err = nop_virtual_engine(gt, siblings, nsibling, n, 0); if (err) - goto out_unlock; + return err; } - err = nop_virtual_engine(i915, siblings, nsibling, n, CHAIN); + err = nop_virtual_engine(gt, siblings, nsibling, n, CHAIN); if (err) - goto out_unlock; + return err; } -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - return err; + return 0; } -static int mask_virtual_engine(struct drm_i915_private *i915, +static int mask_virtual_engine(struct intel_gt *gt, struct intel_engine_cs **siblings, unsigned int nsibling) { @@ -1862,7 +2572,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915, * restrict it to our desired engine within the virtual engine. */ - ctx = kernel_context(i915); + ctx = kernel_context(gt->i915); if (!ctx) return -ENOMEM; @@ -1876,7 +2586,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915, if (err) goto out_put; - err = igt_live_test_begin(&t, i915, __func__, ve->engine->name); + err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name); if (err) goto out_unpin; @@ -1907,7 +2617,7 @@ static int mask_virtual_engine(struct drm_i915_private *i915, request[n]->fence.context, request[n]->fence.seqno); GEM_TRACE_DUMP(); - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); err = -EIO; goto out; } @@ -1922,11 +2632,8 @@ static int mask_virtual_engine(struct drm_i915_private *i915, } err = igt_live_test_end(&t); - if (err) - goto out; - out: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(gt->i915)) err = -EIO; for (n = 0; n < nsibling; n++) @@ -1943,17 +2650,14 @@ out_close: static int live_virtual_mask(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; - struct intel_gt *gt = &i915->gt; unsigned int class, inst; - int err = 0; + int err; - if (USES_GUC_SUBMISSION(i915)) + if (USES_GUC_SUBMISSION(gt->i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - for (class = 0; class <= MAX_ENGINE_CLASS; class++) { unsigned int nsibling; @@ -1967,17 +2671,166 @@ static int live_virtual_mask(void *arg) if (nsibling < 2) continue; - err = mask_virtual_engine(i915, siblings, nsibling); + err = mask_virtual_engine(gt, siblings, nsibling); if (err) - goto out_unlock; + return err; + } + + return 0; +} + +static int preserved_virtual_engine(struct intel_gt *gt, + struct intel_engine_cs **siblings, + unsigned int nsibling) +{ + struct i915_request *last = NULL; + struct i915_gem_context *ctx; + struct intel_context *ve; + struct i915_vma *scratch; + struct igt_live_test t; + unsigned int n; + int err = 0; + u32 *cs; + + ctx = kernel_context(gt->i915); + if (!ctx) + return -ENOMEM; + + scratch = create_scratch(siblings[0]->gt); + if (IS_ERR(scratch)) { + err = PTR_ERR(scratch); + goto out_close; } -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); + ve = intel_execlists_create_virtual(ctx, siblings, nsibling); + if (IS_ERR(ve)) { + err = PTR_ERR(ve); + goto out_scratch; + } + + err = intel_context_pin(ve); + if (err) + goto out_put; + + err = igt_live_test_begin(&t, gt->i915, __func__, ve->engine->name); + if (err) + goto out_unpin; + + for (n = 0; n < NUM_GPR_DW; n++) { + struct intel_engine_cs *engine = siblings[n % nsibling]; + struct i915_request *rq; + + rq = i915_request_create(ve); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out_end; + } + + i915_request_put(last); + last = i915_request_get(rq); + + cs = intel_ring_begin(rq, 8); + if (IS_ERR(cs)) { + i915_request_add(rq); + err = PTR_ERR(cs); + goto out_end; + } + + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = CS_GPR(engine, n); + *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32); + *cs++ = 0; + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = CS_GPR(engine, (n + 1) % NUM_GPR_DW); + *cs++ = n + 1; + + *cs++ = MI_NOOP; + intel_ring_advance(rq, cs); + + /* Restrict this request to run on a particular engine */ + rq->execution_mask = engine->mask; + i915_request_add(rq); + } + + if (i915_request_wait(last, 0, HZ / 5) < 0) { + err = -ETIME; + goto out_end; + } + + cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto out_end; + } + + for (n = 0; n < NUM_GPR_DW; n++) { + if (cs[n] != n) { + pr_err("Incorrect value[%d] found for GPR[%d]\n", + cs[n], n); + err = -EINVAL; + break; + } + } + + i915_gem_object_unpin_map(scratch->obj); + +out_end: + if (igt_live_test_end(&t)) + err = -EIO; + i915_request_put(last); +out_unpin: + intel_context_unpin(ve); +out_put: + intel_context_put(ve); +out_scratch: + i915_vma_unpin_and_release(&scratch, 0); +out_close: + kernel_context_close(ctx); return err; } -static int bond_virtual_engine(struct drm_i915_private *i915, +static int live_virtual_preserved(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; + unsigned int class, inst; + + /* + * Check that the context image retains non-privileged (user) registers + * from one engine to the next. For this we check that the CS_GPR + * are preserved. + */ + + if (USES_GUC_SUBMISSION(gt->i915)) + return 0; + + /* As we use CS_GPR we cannot run before they existed on all engines. */ + if (INTEL_GEN(gt->i915) < 9) + return 0; + + for (class = 0; class <= MAX_ENGINE_CLASS; class++) { + int nsibling, err; + + nsibling = 0; + for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) { + if (!gt->engine_class[class][inst]) + continue; + + siblings[nsibling++] = gt->engine_class[class][inst]; + } + if (nsibling < 2) + continue; + + err = preserved_virtual_engine(gt, siblings, nsibling); + if (err) + return err; + } + + return 0; +} + +static int bond_virtual_engine(struct intel_gt *gt, unsigned int class, struct intel_engine_cs **siblings, unsigned int nsibling, @@ -1993,13 +2846,13 @@ static int bond_virtual_engine(struct drm_i915_private *i915, GEM_BUG_ON(nsibling >= ARRAY_SIZE(rq) - 1); - ctx = kernel_context(i915); + ctx = kernel_context(gt->i915); if (!ctx) return -ENOMEM; err = 0; rq[0] = ERR_PTR(-ENOMEM); - for_each_engine(master, i915, id) { + for_each_engine(master, gt, id) { struct i915_sw_fence fence = {}; if (master->class == class) @@ -2104,7 +2957,7 @@ static int bond_virtual_engine(struct drm_i915_private *i915, out: for (n = 0; !IS_ERR(rq[n]); n++) i915_request_put(rq[n]); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(gt->i915)) err = -EIO; kernel_context_close(ctx); @@ -2121,17 +2974,14 @@ static int live_virtual_bond(void *arg) { "schedule", BOND_SCHEDULE }, { }, }; - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1]; - struct intel_gt *gt = &i915->gt; unsigned int class, inst; - int err = 0; + int err; - if (USES_GUC_SUBMISSION(i915)) + if (USES_GUC_SUBMISSION(gt->i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); - for (class = 0; class <= MAX_ENGINE_CLASS; class++) { const struct phase *p; int nsibling; @@ -2148,38 +2998,42 @@ static int live_virtual_bond(void *arg) continue; for (p = phases; p->name; p++) { - err = bond_virtual_engine(i915, + err = bond_virtual_engine(gt, class, siblings, nsibling, p->flags); if (err) { pr_err("%s(%s): failed class=%d, nsibling=%d, err=%d\n", __func__, p->name, class, nsibling, err); - goto out_unlock; + return err; } } } -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - return err; + return 0; } int intel_execlists_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(live_sanitycheck), + SUBTEST(live_unlite_switch), + SUBTEST(live_unlite_preempt), SUBTEST(live_timeslice_preempt), + SUBTEST(live_timeslice_queue), SUBTEST(live_busywait_preempt), SUBTEST(live_preempt), SUBTEST(live_late_preempt), SUBTEST(live_nopreempt), + SUBTEST(live_preempt_cancel), SUBTEST(live_suppress_self_preempt), SUBTEST(live_suppress_wait_preempt), SUBTEST(live_chain_preempt), SUBTEST(live_preempt_hang), + SUBTEST(live_preempt_timeout), SUBTEST(live_preempt_smoke), SUBTEST(live_virtual_engine), SUBTEST(live_virtual_mask), + SUBTEST(live_virtual_preserved), SUBTEST(live_virtual_bond), }; @@ -2189,5 +3043,512 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915) if (intel_gt_is_wedged(&i915->gt)) return 0; - return i915_live_subtests(tests, i915); + return intel_gt_live_subtests(tests, &i915->gt); +} + +static void hexdump(const void *buf, size_t len) +{ + const size_t rowsize = 8 * sizeof(u32); + const void *prev = NULL; + bool skip = false; + size_t pos; + + for (pos = 0; pos < len; pos += rowsize) { + char line[128]; + + if (prev && !memcmp(prev, buf + pos, rowsize)) { + if (!skip) { + pr_info("*\n"); + skip = true; + } + continue; + } + + WARN_ON_ONCE(hex_dump_to_buffer(buf + pos, len - pos, + rowsize, sizeof(u32), + line, sizeof(line), + false) >= sizeof(line)); + pr_info("[%04zx] %s\n", pos, line); + + prev = buf + pos; + skip = false; + } +} + +static int live_lrc_layout(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + u32 *mem; + int err; + + /* + * Check the registers offsets we use to create the initial reg state + * match the layout saved by HW. + */ + + mem = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!mem) + return -ENOMEM; + + err = 0; + for_each_engine(engine, gt, id) { + u32 *hw, *lrc; + int dw; + + if (!engine->default_state) + continue; + + hw = i915_gem_object_pin_map(engine->default_state, + I915_MAP_WB); + if (IS_ERR(hw)) { + err = PTR_ERR(hw); + break; + } + hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + + lrc = memset(mem, 0, PAGE_SIZE); + execlists_init_reg_state(lrc, + engine->kernel_context, + engine, + engine->kernel_context->ring, + true); + + dw = 0; + do { + u32 lri = hw[dw]; + + if (lri == 0) { + dw++; + continue; + } + + if ((lri & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) { + pr_err("%s: Expected LRI command at dword %d, found %08x\n", + engine->name, dw, lri); + err = -EINVAL; + break; + } + + if (lrc[dw] != lri) { + pr_err("%s: LRI command mismatch at dword %d, expected %08x found %08x\n", + engine->name, dw, lri, lrc[dw]); + err = -EINVAL; + break; + } + + lri &= 0x7f; + lri++; + dw++; + + while (lri) { + if (hw[dw] != lrc[dw]) { + pr_err("%s: Different registers found at dword %d, expected %x, found %x\n", + engine->name, dw, hw[dw], lrc[dw]); + err = -EINVAL; + break; + } + + /* + * Skip over the actual register value as we + * expect that to differ. + */ + dw += 2; + lri -= 2; + } + } while ((lrc[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END); + + if (err) { + pr_info("%s: HW register image:\n", engine->name); + hexdump(hw, PAGE_SIZE); + + pr_info("%s: SW register image:\n", engine->name); + hexdump(lrc, PAGE_SIZE); + } + + i915_gem_object_unpin_map(engine->default_state); + if (err) + break; + } + + kfree(mem); + return err; +} + +static int find_offset(const u32 *lri, u32 offset) +{ + int i; + + for (i = 0; i < PAGE_SIZE / sizeof(u32); i++) + if (lri[i] == offset) + return i; + + return -1; +} + +static int live_lrc_fixed(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; + int err = 0; + + /* + * Check the assumed register offsets match the actual locations in + * the context image. + */ + + for_each_engine(engine, gt, id) { + const struct { + u32 reg; + u32 offset; + const char *name; + } tbl[] = { + { + i915_mmio_reg_offset(RING_START(engine->mmio_base)), + CTX_RING_BUFFER_START - 1, + "RING_START" + }, + { + i915_mmio_reg_offset(RING_CTL(engine->mmio_base)), + CTX_RING_BUFFER_CONTROL - 1, + "RING_CTL" + }, + { + i915_mmio_reg_offset(RING_HEAD(engine->mmio_base)), + CTX_RING_HEAD - 1, + "RING_HEAD" + }, + { + i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)), + CTX_RING_TAIL - 1, + "RING_TAIL" + }, + { + i915_mmio_reg_offset(RING_MI_MODE(engine->mmio_base)), + lrc_ring_mi_mode(engine), + "RING_MI_MODE" + }, + { + engine->mmio_base + 0x110, + CTX_BB_STATE - 1, + "BB_STATE" + }, + { }, + }, *t; + u32 *hw; + + if (!engine->default_state) + continue; + + hw = i915_gem_object_pin_map(engine->default_state, + I915_MAP_WB); + if (IS_ERR(hw)) { + err = PTR_ERR(hw); + break; + } + hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw); + + for (t = tbl; t->name; t++) { + int dw = find_offset(hw, t->reg); + + if (dw != t->offset) { + pr_err("%s: Offset for %s [0x%x] mismatch, found %x, expected %x\n", + engine->name, + t->name, + t->reg, + dw, + t->offset); + err = -EINVAL; + } + } + + i915_gem_object_unpin_map(engine->default_state); + } + + return err; +} + +static int __live_lrc_state(struct i915_gem_context *fixme, + struct intel_engine_cs *engine, + struct i915_vma *scratch) +{ + struct intel_context *ce; + struct i915_request *rq; + enum { + RING_START_IDX = 0, + RING_TAIL_IDX, + MAX_IDX + }; + u32 expected[MAX_IDX]; + u32 *cs; + int err; + int n; + + ce = intel_context_create(fixme, engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + err = intel_context_pin(ce); + if (err) + goto err_put; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_unpin; + } + + cs = intel_ring_begin(rq, 4 * MAX_IDX); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + i915_request_add(rq); + goto err_unpin; + } + + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = i915_mmio_reg_offset(RING_START(engine->mmio_base)); + *cs++ = i915_ggtt_offset(scratch) + RING_START_IDX * sizeof(u32); + *cs++ = 0; + + expected[RING_START_IDX] = i915_ggtt_offset(ce->ring->vma); + + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = i915_mmio_reg_offset(RING_TAIL(engine->mmio_base)); + *cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32); + *cs++ = 0; + + i915_request_get(rq); + i915_request_add(rq); + + intel_engine_flush_submission(engine); + expected[RING_TAIL_IDX] = ce->ring->tail; + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = -ETIME; + goto err_rq; + } + + cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_rq; + } + + for (n = 0; n < MAX_IDX; n++) { + if (cs[n] != expected[n]) { + pr_err("%s: Stored register[%d] value[0x%x] did not match expected[0x%x]\n", + engine->name, n, cs[n], expected[n]); + err = -EINVAL; + break; + } + } + + i915_gem_object_unpin_map(scratch->obj); + +err_rq: + i915_request_put(rq); +err_unpin: + intel_context_unpin(ce); +err_put: + intel_context_put(ce); + return err; +} + +static int live_lrc_state(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *fixme; + struct i915_vma *scratch; + enum intel_engine_id id; + int err = 0; + + /* + * Check the live register state matches what we expect for this + * intel_context. + */ + + fixme = kernel_context(gt->i915); + if (!fixme) + return -ENOMEM; + + scratch = create_scratch(gt); + if (IS_ERR(scratch)) { + err = PTR_ERR(scratch); + goto out_close; + } + + for_each_engine(engine, gt, id) { + err = __live_lrc_state(fixme, engine, scratch); + if (err) + break; + } + + if (igt_flush_test(gt->i915)) + err = -EIO; + + i915_vma_unpin_and_release(&scratch, 0); +out_close: + kernel_context_close(fixme); + return err; +} + +static int gpr_make_dirty(struct intel_engine_cs *engine) +{ + struct i915_request *rq; + u32 *cs; + int n; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + cs = intel_ring_begin(rq, 2 * NUM_GPR_DW + 2); + if (IS_ERR(cs)) { + i915_request_add(rq); + return PTR_ERR(cs); + } + + *cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW); + for (n = 0; n < NUM_GPR_DW; n++) { + *cs++ = CS_GPR(engine, n); + *cs++ = STACK_MAGIC; + } + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + i915_request_add(rq); + + return 0; +} + +static int __live_gpr_clear(struct i915_gem_context *fixme, + struct intel_engine_cs *engine, + struct i915_vma *scratch) +{ + struct intel_context *ce; + struct i915_request *rq; + u32 *cs; + int err; + int n; + + if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS) + return 0; /* GPR only on rcs0 for gen8 */ + + err = gpr_make_dirty(engine); + if (err) + return err; + + ce = intel_context_create(fixme, engine); + if (IS_ERR(ce)) + return PTR_ERR(ce); + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_put; + } + + cs = intel_ring_begin(rq, 4 * NUM_GPR_DW); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + i915_request_add(rq); + goto err_put; + } + + for (n = 0; n < NUM_GPR_DW; n++) { + *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT; + *cs++ = CS_GPR(engine, n); + *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32); + *cs++ = 0; + } + + i915_request_get(rq); + i915_request_add(rq); + + if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = -ETIME; + goto err_rq; + } + + cs = i915_gem_object_pin_map(scratch->obj, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_rq; + } + + for (n = 0; n < NUM_GPR_DW; n++) { + if (cs[n]) { + pr_err("%s: GPR[%d].%s was not zero, found 0x%08x!\n", + engine->name, + n / 2, n & 1 ? "udw" : "ldw", + cs[n]); + err = -EINVAL; + break; + } + } + + i915_gem_object_unpin_map(scratch->obj); + +err_rq: + i915_request_put(rq); +err_put: + intel_context_put(ce); + return err; +} + +static int live_gpr_clear(void *arg) +{ + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + struct i915_gem_context *fixme; + struct i915_vma *scratch; + enum intel_engine_id id; + int err = 0; + + /* + * Check that GPR registers are cleared in new contexts as we need + * to avoid leaking any information from previous contexts. + */ + + fixme = kernel_context(gt->i915); + if (!fixme) + return -ENOMEM; + + scratch = create_scratch(gt); + if (IS_ERR(scratch)) { + err = PTR_ERR(scratch); + goto out_close; + } + + for_each_engine(engine, gt, id) { + err = __live_gpr_clear(fixme, engine, scratch); + if (err) + break; + } + + if (igt_flush_test(gt->i915)) + err = -EIO; + + i915_vma_unpin_and_release(&scratch, 0); +out_close: + kernel_context_close(fixme); + return err; +} + +int intel_lrc_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_lrc_layout), + SUBTEST(live_lrc_fixed), + SUBTEST(live_lrc_state), + SUBTEST(live_gpr_clear), + }; + + if (!HAS_LOGICAL_RING_CONTEXTS(i915)) + return 0; + + return intel_gt_live_subtests(tests, &i915->gt); } diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 00a4f60cdfd5..6ad6aca315f6 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -17,7 +17,7 @@ static int igt_global_reset(void *arg) /* Check that we can issue a global GPU reset */ igt_global_reset_lock(gt); - wakeref = intel_runtime_pm_get(>->i915->runtime_pm); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); reset_count = i915_reset_count(>->i915->gpu_error); @@ -28,7 +28,7 @@ static int igt_global_reset(void *arg) err = -EINVAL; } - intel_runtime_pm_put(>->i915->runtime_pm, wakeref); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); igt_global_reset_unlock(gt); if (intel_gt_is_wedged(gt)) @@ -45,14 +45,14 @@ static int igt_wedged_reset(void *arg) /* Check that we can recover a wedged device with a GPU reset */ igt_global_reset_lock(gt); - wakeref = intel_runtime_pm_get(>->i915->runtime_pm); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); intel_gt_set_wedged(gt); GEM_BUG_ON(!intel_gt_is_wedged(gt)); intel_gt_reset(gt, ALL_ENGINES, NULL); - intel_runtime_pm_put(>->i915->runtime_pm, wakeref); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); igt_global_reset_unlock(gt); return intel_gt_is_wedged(gt) ? -EIO : 0; @@ -112,7 +112,7 @@ static int igt_atomic_engine_reset(void *arg) /* Check that the resets are usable from atomic context */ - if (!intel_has_reset_engine(gt->i915)) + if (!intel_has_reset_engine(gt)) return 0; if (USES_GUC_SUBMISSION(gt->i915)) @@ -125,8 +125,8 @@ static int igt_atomic_engine_reset(void *arg) if (!igt_force_reset(gt)) goto out_unlock; - for_each_engine(engine, gt->i915, id) { - tasklet_disable_nosync(&engine->execlists.tasklet); + for_each_engine(engine, gt, id) { + tasklet_disable(&engine->execlists.tasklet); intel_engine_pm_get(engine); for (p = igt_atomic_phases; p->name; p++) { @@ -170,7 +170,7 @@ int intel_reset_live_selftests(struct drm_i915_private *i915) }; struct intel_gt *gt = &i915->gt; - if (!intel_has_gpu_reset(gt->i915)) + if (!intel_has_gpu_reset(gt)) return 0; if (intel_gt_is_wedged(gt)) diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index 321481403165..f04a59fe5d2c 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -6,8 +6,10 @@ #include <linux/prime_numbers.h> -#include "gem/i915_gem_pm.h" +#include "intel_engine_pm.h" #include "intel_gt.h" +#include "intel_gt_requests.h" +#include "intel_ring.h" #include "../selftests/i915_random.h" #include "../i915_selftest.h" @@ -34,7 +36,7 @@ static unsigned long hwsp_cacheline(struct intel_timeline *tl) #define CACHELINES_PER_PAGE (PAGE_SIZE / CACHELINE_BYTES) struct mock_hwsp_freelist { - struct drm_i915_private *i915; + struct intel_gt *gt; struct radix_tree_root cachelines; struct intel_timeline **history; unsigned long count, max; @@ -67,7 +69,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state, unsigned long cacheline; int err; - tl = intel_timeline_create(&state->i915->gt, NULL); + tl = intel_timeline_create(state->gt, NULL); if (IS_ERR(tl)) return PTR_ERR(tl); @@ -105,6 +107,7 @@ static int __mock_hwsp_timeline(struct mock_hwsp_freelist *state, static int mock_hwsp_freelist(void *arg) { struct mock_hwsp_freelist state; + struct drm_i915_private *i915; const struct { const char *name; unsigned int flags; @@ -116,12 +119,14 @@ static int mock_hwsp_freelist(void *arg) unsigned int na; int err = 0; + i915 = mock_gem_device(); + if (!i915) + return -ENOMEM; + INIT_RADIX_TREE(&state.cachelines, GFP_KERNEL); state.prng = I915_RND_STATE_INITIALIZER(i915_selftest.random_seed); - state.i915 = mock_gem_device(); - if (!state.i915) - return -ENOMEM; + state.gt = &i915->gt; /* * Create a bunch of timelines and check that their HWSP do not overlap. @@ -136,7 +141,6 @@ static int mock_hwsp_freelist(void *arg) goto err_put; } - mutex_lock(&state.i915->drm.struct_mutex); for (p = phases; p->name; p++) { pr_debug("%s(%s)\n", __func__, p->name); for_each_prime_number_from(na, 1, 2 * CACHELINES_PER_PAGE) { @@ -149,10 +153,9 @@ static int mock_hwsp_freelist(void *arg) out: for (na = 0; na < state.max; na++) __mock_hwsp_record(&state, na, NULL); - mutex_unlock(&state.i915->drm.struct_mutex); kfree(state.history); err_put: - drm_dev_put(&state.i915->drm); + drm_dev_put(&i915->drm); return err; } @@ -449,8 +452,6 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value) struct i915_request *rq; int err; - lockdep_assert_held(&tl->gt->i915->drm.struct_mutex); /* lazy rq refs */ - err = intel_timeline_pin(tl); if (err) { rq = ERR_PTR(err); @@ -461,10 +462,14 @@ tl_write(struct intel_timeline *tl, struct intel_engine_cs *engine, u32 value) if (IS_ERR(rq)) goto out_unpin; + i915_request_get(rq); + err = emit_ggtt_store_dw(rq, tl->hwsp_offset, value); i915_request_add(rq); - if (err) + if (err) { + i915_request_put(rq); rq = ERR_PTR(err); + } out_unpin: intel_timeline_unpin(tl); @@ -475,11 +480,11 @@ out: } static struct intel_timeline * -checked_intel_timeline_create(struct drm_i915_private *i915) +checked_intel_timeline_create(struct intel_gt *gt) { struct intel_timeline *tl; - tl = intel_timeline_create(&i915->gt, NULL); + tl = intel_timeline_create(gt, NULL); if (IS_ERR(tl)) return tl; @@ -496,11 +501,10 @@ checked_intel_timeline_create(struct drm_i915_private *i915) static int live_hwsp_engine(void *arg) { #define NUM_TIMELINES 4096 - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct intel_timeline **timelines; struct intel_engine_cs *engine; enum intel_engine_id id; - intel_wakeref_t wakeref; unsigned long count, n; int err = 0; @@ -515,37 +519,40 @@ static int live_hwsp_engine(void *arg) if (!timelines) return -ENOMEM; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - count = 0; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { if (!intel_engine_can_store_dword(engine)) continue; + intel_engine_pm_get(engine); + for (n = 0; n < NUM_TIMELINES; n++) { struct intel_timeline *tl; struct i915_request *rq; - tl = checked_intel_timeline_create(i915); + tl = checked_intel_timeline_create(gt); if (IS_ERR(tl)) { err = PTR_ERR(tl); - goto out; + break; } rq = tl_write(tl, engine, count); if (IS_ERR(rq)) { intel_timeline_put(tl); err = PTR_ERR(rq); - goto out; + break; } timelines[count++] = tl; + i915_request_put(rq); } + + intel_engine_pm_put(engine); + if (err) + break; } -out: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(gt->i915)) err = -EIO; for (n = 0; n < count; n++) { @@ -559,11 +566,7 @@ out: intel_timeline_put(tl); } - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - kvfree(timelines); - return err; #undef NUM_TIMELINES } @@ -571,11 +574,10 @@ out: static int live_hwsp_alternate(void *arg) { #define NUM_TIMELINES 4096 - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct intel_timeline **timelines; struct intel_engine_cs *engine; enum intel_engine_id id; - intel_wakeref_t wakeref; unsigned long count, n; int err = 0; @@ -591,25 +593,25 @@ static int live_hwsp_alternate(void *arg) if (!timelines) return -ENOMEM; - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - count = 0; for (n = 0; n < NUM_TIMELINES; n++) { - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { struct intel_timeline *tl; struct i915_request *rq; if (!intel_engine_can_store_dword(engine)) continue; - tl = checked_intel_timeline_create(i915); + tl = checked_intel_timeline_create(gt); if (IS_ERR(tl)) { + intel_engine_pm_put(engine); err = PTR_ERR(tl); goto out; } + intel_engine_pm_get(engine); rq = tl_write(tl, engine, count); + intel_engine_pm_put(engine); if (IS_ERR(rq)) { intel_timeline_put(tl); err = PTR_ERR(rq); @@ -617,11 +619,12 @@ static int live_hwsp_alternate(void *arg) } timelines[count++] = tl; + i915_request_put(rq); } } out: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(gt->i915)) err = -EIO; for (n = 0; n < count; n++) { @@ -635,22 +638,17 @@ out: intel_timeline_put(tl); } - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - kvfree(timelines); - return err; #undef NUM_TIMELINES } static int live_hwsp_wrap(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct intel_engine_cs *engine; struct intel_timeline *tl; enum intel_engine_id id; - intel_wakeref_t wakeref; int err = 0; /* @@ -658,14 +656,10 @@ static int live_hwsp_wrap(void *arg) * foreign GPU references. */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + tl = intel_timeline_create(gt, NULL); + if (IS_ERR(tl)) + return PTR_ERR(tl); - tl = intel_timeline_create(&i915->gt, NULL); - if (IS_ERR(tl)) { - err = PTR_ERR(tl); - goto out_rpm; - } if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline) goto out_free; @@ -673,7 +667,7 @@ static int live_hwsp_wrap(void *arg) if (err) goto out_free; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { const u32 *hwsp_seqno[2]; struct i915_request *rq; u32 seqno[2]; @@ -681,7 +675,9 @@ static int live_hwsp_wrap(void *arg) if (!intel_engine_can_store_dword(engine)) continue; + intel_engine_pm_get(engine); rq = i915_request_create(engine->kernel_context); + intel_engine_pm_put(engine); if (IS_ERR(rq)) { err = PTR_ERR(rq); goto out; @@ -743,29 +739,24 @@ static int live_hwsp_wrap(void *arg) goto out; } - i915_retire_requests(i915); /* recycle HWSP */ + intel_gt_retire_requests(gt); /* recycle HWSP */ } out: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(gt->i915)) err = -EIO; intel_timeline_unpin(tl); out_free: intel_timeline_put(tl); -out_rpm: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; } static int live_hwsp_recycle(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct intel_engine_cs *engine; enum intel_engine_id id; - intel_wakeref_t wakeref; unsigned long count; int err = 0; @@ -775,38 +766,38 @@ static int live_hwsp_recycle(void *arg) * want to confuse ourselves or the GPU. */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - count = 0; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { IGT_TIMEOUT(end_time); if (!intel_engine_can_store_dword(engine)) continue; + intel_engine_pm_get(engine); + do { struct intel_timeline *tl; struct i915_request *rq; - tl = checked_intel_timeline_create(i915); + tl = checked_intel_timeline_create(gt); if (IS_ERR(tl)) { err = PTR_ERR(tl); - goto out; + break; } rq = tl_write(tl, engine, count); if (IS_ERR(rq)) { intel_timeline_put(tl); err = PTR_ERR(rq); - goto out; + break; } if (i915_request_wait(rq, 0, HZ / 5) < 0) { pr_err("Wait for timeline writes timed out!\n"); + i915_request_put(rq); intel_timeline_put(tl); err = -EIO; - goto out; + break; } if (*tl->hwsp_seqno != count) { @@ -815,17 +806,18 @@ static int live_hwsp_recycle(void *arg) err = -EINVAL; } + i915_request_put(rq); intel_timeline_put(tl); count++; if (err) - goto out; + break; } while (!__igt_timeout(end_time, NULL)); - } -out: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); + intel_engine_pm_put(engine); + if (err) + break; + } return err; } @@ -842,5 +834,5 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915) if (intel_gt_is_wedged(&i915->gt)) return 0; - return i915_live_subtests(tests, i915); + return intel_gt_live_subtests(tests, &i915->gt); } diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index d06d68ac2a3b..abce6e4ec9c0 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -33,8 +33,32 @@ struct wa_lists { } engine[I915_NUM_ENGINES]; }; +static int request_add_sync(struct i915_request *rq, int err) +{ + i915_request_get(rq); + i915_request_add(rq); + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -EIO; + i915_request_put(rq); + + return err; +} + +static int request_add_spin(struct i915_request *rq, struct igt_spinner *spin) +{ + int err = 0; + + i915_request_get(rq); + i915_request_add(rq); + if (spin && !igt_wait_for_spinner(spin, rq)) + err = -ETIMEDOUT; + i915_request_put(rq); + + return err; +} + static void -reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists) +reference_lists_init(struct intel_gt *gt, struct wa_lists *lists) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -42,10 +66,10 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists) memset(lists, 0, sizeof(*lists)); wa_init_start(&lists->gt_wa_list, "GT_REF", "global"); - gt_init_workarounds(i915, &lists->gt_wa_list); + gt_init_workarounds(gt->i915, &lists->gt_wa_list); wa_init_finish(&lists->gt_wa_list); - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { struct i915_wa_list *wal = &lists->engine[id].wa_list; wa_init_start(wal, "REF", engine->name); @@ -59,12 +83,12 @@ reference_lists_init(struct drm_i915_private *i915, struct wa_lists *lists) } static void -reference_lists_fini(struct drm_i915_private *i915, struct wa_lists *lists) +reference_lists_fini(struct intel_gt *gt, struct wa_lists *lists) { struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, i915, id) + for_each_engine(engine, gt, id) intel_wa_list_free(&lists->engine[id].wa_list); intel_wa_list_free(&lists->gt_wa_list); @@ -191,10 +215,10 @@ static int check_whitelist(struct i915_gem_context *ctx, err = 0; i915_gem_object_lock(results); - intel_wedge_on_timeout(&wedge, &ctx->i915->gt, HZ / 5) /* safety net! */ + intel_wedge_on_timeout(&wedge, engine->gt, HZ / 5) /* safety net! */ err = i915_gem_object_set_to_cpu_domain(results, false); i915_gem_object_unlock(results); - if (intel_gt_is_wedged(&ctx->i915->gt)) + if (intel_gt_is_wedged(engine->gt)) err = -EIO; if (err) goto out_put; @@ -243,7 +267,6 @@ switch_to_scratch_context(struct intel_engine_cs *engine, struct i915_gem_context *ctx; struct intel_context *ce; struct i915_request *rq; - intel_wakeref_t wakeref; int err = 0; ctx = kernel_context(engine->i915); @@ -255,12 +278,9 @@ switch_to_scratch_context(struct intel_engine_cs *engine, ce = i915_gem_context_get_engine(ctx, engine->legacy_idx); GEM_BUG_ON(IS_ERR(ce)); - rq = ERR_PTR(-ENODEV); - with_intel_runtime_pm(&engine->i915->runtime_pm, wakeref) - rq = igt_spinner_create_request(spin, ce, MI_NOOP); + rq = igt_spinner_create_request(spin, ce, MI_NOOP); intel_context_put(ce); - kernel_context_close(ctx); if (IS_ERR(rq)) { spin = NULL; @@ -268,17 +288,12 @@ switch_to_scratch_context(struct intel_engine_cs *engine, goto err; } - i915_request_add(rq); - - if (spin && !igt_wait_for_spinner(spin, rq)) { - pr_err("Spinner failed to start\n"); - err = -ETIMEDOUT; - } - + err = request_add_spin(rq, spin); err: if (err && spin) igt_spinner_end(spin); + kernel_context_close(ctx); return err; } @@ -313,7 +328,7 @@ static int check_whitelist_across_reset(struct intel_engine_cs *engine, if (err) goto out_spin; - with_intel_runtime_pm(&i915->runtime_pm, wakeref) + with_intel_runtime_pm(engine->uncore->rpm, wakeref) err = reset(engine); igt_spinner_end(&spin); @@ -355,6 +370,7 @@ out_ctx: static struct i915_vma *create_batch(struct i915_gem_context *ctx) { struct drm_i915_gem_object *obj; + struct i915_address_space *vm; struct i915_vma *vma; int err; @@ -362,7 +378,9 @@ static struct i915_vma *create_batch(struct i915_gem_context *ctx) if (IS_ERR(obj)) return ERR_CAST(obj); - vma = i915_vma_instance(obj, ctx->vm, NULL); + vm = i915_gem_context_get_vm_rcu(ctx); + vma = i915_vma_instance(obj, vm, NULL); + i915_vm_put(vm); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err_obj; @@ -463,12 +481,15 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, 0xffff00ff, 0xffffffff, }; + struct i915_address_space *vm; struct i915_vma *scratch; struct i915_vma *batch; int err = 0, i, v; u32 *cs, *results; - scratch = create_scratch(ctx->vm, 2 * ARRAY_SIZE(values) + 1); + vm = i915_gem_context_get_vm_rcu(ctx); + scratch = create_scratch(vm, 2 * ARRAY_SIZE(values) + 1); + i915_vm_put(vm); if (IS_ERR(scratch)) return PTR_ERR(scratch); @@ -492,6 +513,9 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, ro_reg = ro_register(reg); + /* Clear non priv flags */ + reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; + srm = MI_STORE_REGISTER_MEM; lrm = MI_LOAD_REGISTER_MEM; if (INTEL_GEN(ctx->i915) >= 8) @@ -565,6 +589,14 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, goto err_request; } + i915_vma_lock(batch); + err = i915_request_await_object(rq, batch->obj, false); + if (err == 0) + err = i915_vma_move_to_active(batch, rq, 0); + i915_vma_unlock(batch); + if (err) + goto err_request; + err = engine->emit_bb_start(rq, batch->node.start, PAGE_SIZE, 0); @@ -572,15 +604,11 @@ static int check_dirty_whitelist(struct i915_gem_context *ctx, goto err_request; err_request: - i915_request_add(rq); - if (err) - goto out_batch; - - if (i915_request_wait(rq, 0, HZ / 5) < 0) { + err = request_add_sync(rq, err); + if (err) { pr_err("%s: Futzing %x timedout; cancelling test\n", engine->name, reg); - intel_gt_set_wedged(&ctx->i915->gt); - err = -EIO; + intel_gt_set_wedged(engine->gt); goto out_batch; } @@ -668,7 +696,7 @@ out_unpin: break; } - if (igt_flush_test(ctx->i915, I915_WAIT_LOCKED)) + if (igt_flush_test(ctx->i915)) err = -EIO; out_batch: i915_vma_unpin_and_release(&batch, 0); @@ -679,36 +707,29 @@ out_scratch: static int live_dirty_whitelist(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct intel_engine_cs *engine; struct i915_gem_context *ctx; enum intel_engine_id id; - intel_wakeref_t wakeref; struct drm_file *file; int err = 0; /* Can the user write to the whitelisted registers? */ - if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */ + if (INTEL_GEN(gt->i915) < 7) /* minimum requirement for LRI, SRM, LRM */ return 0; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + file = mock_file(gt->i915); + if (IS_ERR(file)) + return PTR_ERR(file); - mutex_unlock(&i915->drm.struct_mutex); - file = mock_file(i915); - mutex_lock(&i915->drm.struct_mutex); - if (IS_ERR(file)) { - err = PTR_ERR(file); - goto out_rpm; - } - - ctx = live_context(i915, file); + ctx = live_context(gt->i915, file); if (IS_ERR(ctx)) { err = PTR_ERR(ctx); goto out_file; } - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { if (engine->whitelist.count == 0) continue; @@ -718,45 +739,43 @@ static int live_dirty_whitelist(void *arg) } out_file: - mutex_unlock(&i915->drm.struct_mutex); - mock_file_free(i915, file); - mutex_lock(&i915->drm.struct_mutex); -out_rpm: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + mock_file_free(gt->i915, file); return err; } static int live_reset_whitelist(void *arg) { - struct drm_i915_private *i915 = arg; - struct intel_engine_cs *engine = i915->engine[RCS0]; + struct intel_gt *gt = arg; + struct intel_engine_cs *engine; + enum intel_engine_id id; int err = 0; /* If we reset the gpu, we should not lose the RING_NONPRIV */ + igt_global_reset_lock(gt); - if (!engine || engine->whitelist.count == 0) - return 0; - - igt_global_reset_lock(&i915->gt); + for_each_engine(engine, gt, id) { + if (engine->whitelist.count == 0) + continue; - if (intel_has_reset_engine(i915)) { - err = check_whitelist_across_reset(engine, - do_engine_reset, - "engine"); - if (err) - goto out; - } + if (intel_has_reset_engine(gt)) { + err = check_whitelist_across_reset(engine, + do_engine_reset, + "engine"); + if (err) + goto out; + } - if (intel_has_gpu_reset(i915)) { - err = check_whitelist_across_reset(engine, - do_device_reset, - "device"); - if (err) - goto out; + if (intel_has_gpu_reset(gt)) { + err = check_whitelist_across_reset(engine, + do_device_reset, + "device"); + if (err) + goto out; + } } out: - igt_global_reset_unlock(&i915->gt); + igt_global_reset_unlock(gt); return err; } @@ -772,6 +791,14 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx, if (IS_ERR(rq)) return PTR_ERR(rq); + i915_vma_lock(results); + err = i915_request_await_object(rq, results->obj, true); + if (err == 0) + err = i915_vma_move_to_active(results, rq, EXEC_OBJECT_WRITE); + i915_vma_unlock(results); + if (err) + goto err_req; + srm = MI_STORE_REGISTER_MEM; if (INTEL_GEN(ctx->i915) >= 8) srm++; @@ -786,8 +813,8 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx, u64 offset = results->node.start + sizeof(u32) * i; u32 reg = i915_mmio_reg_offset(engine->whitelist.list[i].reg); - /* Clear access permission field */ - reg &= ~RING_FORCE_TO_NONPRIV_ACCESS_MASK; + /* Clear non priv flags */ + reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; *cs++ = srm; *cs++ = reg; @@ -797,12 +824,7 @@ static int read_whitelisted_registers(struct i915_gem_context *ctx, intel_ring_advance(rq, cs); err_req: - i915_request_add(rq); - - if (i915_request_wait(rq, 0, HZ / 5) < 0) - err = -EIO; - - return err; + return request_add_sync(rq, err); } static int scrub_whitelisted_registers(struct i915_gem_context *ctx, @@ -830,6 +852,9 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx, if (ro_register(reg)) continue; + /* Clear non priv flags */ + reg &= RING_FORCE_TO_NONPRIV_ADDRESS_MASK; + *cs++ = reg; *cs++ = 0xffffffff; } @@ -850,13 +875,19 @@ static int scrub_whitelisted_registers(struct i915_gem_context *ctx, goto err_request; } + i915_vma_lock(batch); + err = i915_request_await_object(rq, batch->obj, false); + if (err == 0) + err = i915_vma_move_to_active(batch, rq, 0); + i915_vma_unlock(batch); + if (err) + goto err_request; + /* Perform the writes from an unprivileged "user" batch */ err = engine->emit_bb_start(rq, batch->node.start, 0, 0); err_request: - i915_request_add(rq); - if (i915_request_wait(rq, 0, HZ / 5) < 0) - err = -EIO; + err = request_add_sync(rq, err); err_unpin: i915_gem_object_unpin_map(batch->obj); @@ -973,7 +1004,7 @@ err_a: static int live_isolated_whitelist(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct { struct i915_gem_context *ctx; struct i915_vma *scratch[2]; @@ -987,40 +1018,46 @@ static int live_isolated_whitelist(void *arg) * invisible to a second context. */ - if (!intel_engines_has_context_isolation(i915)) - return 0; - - if (!i915->kernel_context->vm) + if (!intel_engines_has_context_isolation(gt->i915)) return 0; for (i = 0; i < ARRAY_SIZE(client); i++) { + struct i915_address_space *vm; struct i915_gem_context *c; - c = kernel_context(i915); + c = kernel_context(gt->i915); if (IS_ERR(c)) { err = PTR_ERR(c); goto err; } - client[i].scratch[0] = create_scratch(c->vm, 1024); + vm = i915_gem_context_get_vm_rcu(c); + + client[i].scratch[0] = create_scratch(vm, 1024); if (IS_ERR(client[i].scratch[0])) { err = PTR_ERR(client[i].scratch[0]); + i915_vm_put(vm); kernel_context_close(c); goto err; } - client[i].scratch[1] = create_scratch(c->vm, 1024); + client[i].scratch[1] = create_scratch(vm, 1024); if (IS_ERR(client[i].scratch[1])) { err = PTR_ERR(client[i].scratch[1]); i915_vma_unpin_and_release(&client[i].scratch[0], 0); + i915_vm_put(vm); kernel_context_close(c); goto err; } client[i].ctx = c; + i915_vm_put(vm); } - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { + if (!engine->kernel_context->vm) + continue; + if (!whitelist_writable_count(engine)) continue; @@ -1074,7 +1111,7 @@ err: kernel_context_close(client[i].ctx); } - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(gt->i915)) err = -EIO; return err; @@ -1109,16 +1146,16 @@ verify_wa_lists(struct i915_gem_context *ctx, struct wa_lists *lists, static int live_gpu_reset_workarounds(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct i915_gem_context *ctx; intel_wakeref_t wakeref; struct wa_lists lists; bool ok; - if (!intel_has_gpu_reset(i915)) + if (!intel_has_gpu_reset(gt)) return 0; - ctx = kernel_context(i915); + ctx = kernel_context(gt->i915); if (IS_ERR(ctx)) return PTR_ERR(ctx); @@ -1126,25 +1163,25 @@ live_gpu_reset_workarounds(void *arg) pr_info("Verifying after GPU reset...\n"); - igt_global_reset_lock(&i915->gt); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + igt_global_reset_lock(gt); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); - reference_lists_init(i915, &lists); + reference_lists_init(gt, &lists); ok = verify_wa_lists(ctx, &lists, "before reset"); if (!ok) goto out; - intel_gt_reset(&i915->gt, ALL_ENGINES, "live_workarounds"); + intel_gt_reset(gt, ALL_ENGINES, "live_workarounds"); ok = verify_wa_lists(ctx, &lists, "after reset"); out: i915_gem_context_unlock_engines(ctx); kernel_context_close(ctx); - reference_lists_fini(i915, &lists); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - igt_global_reset_unlock(&i915->gt); + reference_lists_fini(gt, &lists); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + igt_global_reset_unlock(gt); return ok ? 0 : -ESRCH; } @@ -1152,7 +1189,7 @@ out: static int live_engine_reset_workarounds(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct i915_gem_engines_iter it; struct i915_gem_context *ctx; struct intel_context *ce; @@ -1162,17 +1199,17 @@ live_engine_reset_workarounds(void *arg) struct wa_lists lists; int ret = 0; - if (!intel_has_reset_engine(i915)) + if (!intel_has_reset_engine(gt)) return 0; - ctx = kernel_context(i915); + ctx = kernel_context(gt->i915); if (IS_ERR(ctx)) return PTR_ERR(ctx); - igt_global_reset_lock(&i915->gt); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + igt_global_reset_lock(gt); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); - reference_lists_init(i915, &lists); + reference_lists_init(gt, &lists); for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { struct intel_engine_cs *engine = ce->engine; @@ -1205,12 +1242,10 @@ live_engine_reset_workarounds(void *arg) goto err; } - i915_request_add(rq); - - if (!igt_wait_for_spinner(&spin, rq)) { + ret = request_add_spin(rq, &spin); + if (ret) { pr_err("Spinner failed to start\n"); igt_spinner_fini(&spin); - ret = -ETIMEDOUT; goto err; } @@ -1227,12 +1262,12 @@ live_engine_reset_workarounds(void *arg) } err: i915_gem_context_unlock_engines(ctx); - reference_lists_fini(i915, &lists); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - igt_global_reset_unlock(&i915->gt); + reference_lists_fini(gt, &lists); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); + igt_global_reset_unlock(gt); kernel_context_close(ctx); - igt_flush_test(i915, I915_WAIT_LOCKED); + igt_flush_test(gt->i915); return ret; } @@ -1246,14 +1281,9 @@ int intel_workarounds_live_selftests(struct drm_i915_private *i915) SUBTEST(live_gpu_reset_workarounds), SUBTEST(live_engine_reset_workarounds), }; - int err; if (intel_gt_is_wedged(&i915->gt)) return 0; - mutex_lock(&i915->drm.struct_mutex); - err = i915_subtests(tests, i915); - mutex_unlock(&i915->drm.struct_mutex); - - return err; + return intel_gt_live_subtests(tests, &i915->gt); } diff --git a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c index 598170efcaf6..2a77c051f36a 100644 --- a/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftests/mock_timeline.c @@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context) mutex_init(&timeline->mutex); - INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex); + INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex); INIT_LIST_HEAD(&timeline->requests); i915_syncmap_init(&timeline->sync); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 249c747e9756..3ee4a4e7689d 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -4,11 +4,34 @@ */ #include "gt/intel_gt.h" +#include "gt/intel_gt_irq.h" +#include "gt/intel_gt_pm_irq.h" #include "intel_guc.h" #include "intel_guc_ads.h" #include "intel_guc_submission.h" #include "i915_drv.h" +/** + * DOC: GuC + * + * The GuC is a microcontroller inside the GT HW, introduced in gen9. The GuC is + * designed to offload some of the functionality usually performed by the host + * driver; currently the main operations it can take care of are: + * + * - Authentication of the HuC, which is required to fully enable HuC usage. + * - Low latency graphics context scheduling (a.k.a. GuC submission). + * - GT Power management. + * + * The enable_guc module parameter can be used to select which of those + * operations to enable within GuC. Note that not all the operations are + * supported on all gen9+ platforms. + * + * Enabling the GuC is not mandatory and therefore the firmware is only loaded + * if at least one of the operations is selected. However, not loading the GuC + * might result in the loss of some features that do require the GuC (currently + * just the HuC, but more are expected to land in the future). + */ + static void gen8_guc_raise_irq(struct intel_guc *guc) { struct intel_gt *gt = guc_to_gt(guc); @@ -56,6 +79,93 @@ void intel_guc_init_send_regs(struct intel_guc *guc) guc->send_regs.fw_domains = fw_domains; } +static void gen9_reset_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + assert_rpm_wakelock_held(>->i915->runtime_pm); + + spin_lock_irq(>->irq_lock); + gen6_gt_pm_reset_iir(gt, gt->pm_guc_events); + spin_unlock_irq(>->irq_lock); +} + +static void gen9_enable_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + assert_rpm_wakelock_held(>->i915->runtime_pm); + + spin_lock_irq(>->irq_lock); + if (!guc->interrupts.enabled) { + WARN_ON_ONCE(intel_uncore_read(gt->uncore, GEN8_GT_IIR(2)) & + gt->pm_guc_events); + guc->interrupts.enabled = true; + gen6_gt_pm_enable_irq(gt, gt->pm_guc_events); + } + spin_unlock_irq(>->irq_lock); +} + +static void gen9_disable_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + assert_rpm_wakelock_held(>->i915->runtime_pm); + + spin_lock_irq(>->irq_lock); + guc->interrupts.enabled = false; + + gen6_gt_pm_disable_irq(gt, gt->pm_guc_events); + + spin_unlock_irq(>->irq_lock); + intel_synchronize_irq(gt->i915); + + gen9_reset_guc_interrupts(guc); +} + +static void gen11_reset_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + spin_lock_irq(>->irq_lock); + gen11_gt_reset_one_iir(gt, 0, GEN11_GUC); + spin_unlock_irq(>->irq_lock); +} + +static void gen11_enable_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + spin_lock_irq(>->irq_lock); + if (!guc->interrupts.enabled) { + u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); + + WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC)); + intel_uncore_write(gt->uncore, + GEN11_GUC_SG_INTR_ENABLE, events); + intel_uncore_write(gt->uncore, + GEN11_GUC_SG_INTR_MASK, ~events); + guc->interrupts.enabled = true; + } + spin_unlock_irq(>->irq_lock); +} + +static void gen11_disable_guc_interrupts(struct intel_guc *guc) +{ + struct intel_gt *gt = guc_to_gt(guc); + + spin_lock_irq(>->irq_lock); + guc->interrupts.enabled = false; + + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0); + intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0); + + spin_unlock_irq(>->irq_lock); + intel_synchronize_irq(gt->i915); + + gen11_reset_guc_interrupts(guc); +} + void intel_guc_init_early(struct intel_guc *guc) { struct drm_i915_private *i915 = guc_to_gt(guc)->i915; @@ -82,32 +192,6 @@ void intel_guc_init_early(struct intel_guc *guc) } } -static int guc_shared_data_create(struct intel_guc *guc) -{ - struct i915_vma *vma; - void *vaddr; - - vma = intel_guc_allocate_vma(guc, PAGE_SIZE); - if (IS_ERR(vma)) - return PTR_ERR(vma); - - vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB); - if (IS_ERR(vaddr)) { - i915_vma_unpin_and_release(&vma, 0); - return PTR_ERR(vaddr); - } - - guc->shared_data = vma; - guc->shared_data_vaddr = vaddr; - - return 0; -} - -static void guc_shared_data_destroy(struct intel_guc *guc) -{ - i915_vma_unpin_and_release(&guc->shared_data, I915_VMA_RELEASE_MAP); -} - static u32 guc_ctl_debug_flags(struct intel_guc *guc) { u32 level = intel_guc_log_get_level(&guc->log); @@ -254,14 +338,9 @@ int intel_guc_init(struct intel_guc *guc) if (ret) goto err_fetch; - ret = guc_shared_data_create(guc); - if (ret) - goto err_fw; - GEM_BUG_ON(!guc->shared_data); - ret = intel_guc_log_create(&guc->log); if (ret) - goto err_shared; + goto err_fw; ret = intel_guc_ads_create(guc); if (ret) @@ -296,8 +375,6 @@ err_ads: intel_guc_ads_destroy(guc); err_log: intel_guc_log_destroy(&guc->log); -err_shared: - guc_shared_data_destroy(guc); err_fw: intel_uc_fw_fini(&guc->fw); err_fetch: @@ -322,7 +399,6 @@ void intel_guc_fini(struct intel_guc *guc) intel_guc_ads_destroy(guc); intel_guc_log_destroy(&guc->log); - guc_shared_data_destroy(guc); intel_uc_fw_fini(&guc->fw); intel_uc_fw_cleanup_fetch(&guc->fw); } @@ -478,6 +554,13 @@ int intel_guc_suspend(struct intel_guc *guc) }; /* + * If GuC communication is enabled but submission is not supported, + * we do not need to suspend the GuC. + */ + if (!intel_guc_submission_is_enabled(guc)) + return 0; + + /* * The ENTER_S_STATE action queues the save/restore operation in GuC FW * and then returns, so waiting on the H2G is not enough to guarantee * GuC is done. When all the processing is done, GuC writes @@ -518,19 +601,9 @@ int intel_guc_suspend(struct intel_guc *guc) int intel_guc_reset_engine(struct intel_guc *guc, struct intel_engine_cs *engine) { - u32 data[7]; - - GEM_BUG_ON(!guc->execbuf_client); - - data[0] = INTEL_GUC_ACTION_REQUEST_ENGINE_RESET; - data[1] = engine->guc_id; - data[2] = 0; - data[3] = 0; - data[4] = 0; - data[5] = guc->execbuf_client->stage_id; - data[6] = intel_guc_ggtt_offset(guc, guc->shared_data); + /* XXX: to be implemented with submission interface rework */ - return intel_guc_send(guc, data, ARRAY_SIZE(data)); + return -ENODEV; } /** @@ -544,13 +617,27 @@ int intel_guc_resume(struct intel_guc *guc) GUC_POWER_D0, }; + /* + * If GuC communication is enabled but submission is not supported, + * we do not need to resume the GuC but we do need to enable the + * GuC communication on resume (above). + */ + if (!intel_guc_submission_is_enabled(guc)) + return 0; + return intel_guc_send(guc, action, ARRAY_SIZE(action)); } /** - * DOC: GuC Address Space + * DOC: GuC Memory Management * - * The layout of GuC address space is shown below: + * GuC can't allocate any memory for its own usage, so all the allocations must + * be handled by the host driver. GuC accesses the memory via the GGTT, with the + * exception of the top and bottom parts of the 4GB address space, which are + * instead re-mapped by the GuC HW to memory location of the FW itself (WOPCM) + * or other parts of the HW. The driver must take care not to place objects that + * the GuC is going to access in these reserved ranges. The layout of the GuC + * address space is shown below: * * :: * diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index 2b2f046d3cc3..e6400204a2bd 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -47,8 +47,6 @@ struct intel_guc { struct i915_vma *stage_desc_pool; void *stage_desc_pool_vaddr; struct ida stage_ids; - struct i915_vma *shared_data; - void *shared_data_vaddr; struct intel_guc_client *execbuf_client; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h index 1d3cdd67ca2f..a26a85d50209 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_fwif.h @@ -548,6 +548,7 @@ enum intel_guc_action { INTEL_GUC_ACTION_ALLOCATE_DOORBELL = 0x10, INTEL_GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE = 0x30, + INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x40, INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH = 0x302, INTEL_GUC_ACTION_ENTER_S_STATE = 0x501, INTEL_GUC_ACTION_EXIT_S_STATE = 0x502, @@ -556,7 +557,6 @@ enum intel_guc_action { INTEL_GUC_ACTION_AUTHENTICATE_HUC = 0x4000, INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER = 0x4505, INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER = 0x4506, - INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING = 0x0E000, INTEL_GUC_ACTION_LIMIT }; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index 36332064de9c..caed0d57e704 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -226,7 +226,7 @@ static void guc_read_update_log_buffer(struct intel_guc_log *log) mutex_lock(&log->relay.lock); - if (WARN_ON(!intel_guc_log_relay_enabled(log))) + if (WARN_ON(!intel_guc_log_relay_created(log))) goto out_unlock; /* Get the pointer to shared GuC log buffer */ @@ -361,6 +361,7 @@ void intel_guc_log_init_early(struct intel_guc_log *log) { mutex_init(&log->relay.lock); INIT_WORK(&log->relay.flush_work, capture_logs_work); + log->relay.started = false; } static int guc_log_relay_create(struct intel_guc_log *log) @@ -546,7 +547,7 @@ out_unlock: return ret; } -bool intel_guc_log_relay_enabled(const struct intel_guc_log *log) +bool intel_guc_log_relay_created(const struct intel_guc_log *log) { return log->relay.buf_addr; } @@ -560,7 +561,7 @@ int intel_guc_log_relay_open(struct intel_guc_log *log) mutex_lock(&log->relay.lock); - if (intel_guc_log_relay_enabled(log)) { + if (intel_guc_log_relay_created(log)) { ret = -EEXIST; goto out_unlock; } @@ -585,6 +586,21 @@ int intel_guc_log_relay_open(struct intel_guc_log *log) mutex_unlock(&log->relay.lock); + return 0; + +out_relay: + guc_log_relay_destroy(log); +out_unlock: + mutex_unlock(&log->relay.lock); + + return ret; +} + +int intel_guc_log_relay_start(struct intel_guc_log *log) +{ + if (log->relay.started) + return -EEXIST; + guc_log_enable_flush_events(log); /* @@ -594,47 +610,59 @@ int intel_guc_log_relay_open(struct intel_guc_log *log) */ queue_work(system_highpri_wq, &log->relay.flush_work); - return 0; + log->relay.started = true; -out_relay: - guc_log_relay_destroy(log); -out_unlock: - mutex_unlock(&log->relay.lock); - - return ret; + return 0; } void intel_guc_log_relay_flush(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); - struct drm_i915_private *i915 = guc_to_gt(guc)->i915; intel_wakeref_t wakeref; + if (!log->relay.started) + return; + /* * Before initiating the forceful flush, wait for any pending/ongoing * flush to complete otherwise forceful flush may not actually happen. */ flush_work(&log->relay.flush_work); - with_intel_runtime_pm(&i915->runtime_pm, wakeref) + with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref) guc_action_flush_log(guc); /* GuC would have updated log buffer by now, so capture it */ guc_log_capture_logs(log); } -void intel_guc_log_relay_close(struct intel_guc_log *log) +/* + * Stops the relay log. Called from intel_guc_log_relay_close(), so no + * possibility of race with start/flush since relay_write cannot race + * relay_close. + */ +static void guc_log_relay_stop(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *i915 = guc_to_gt(guc)->i915; + if (!log->relay.started) + return; + guc_log_disable_flush_events(log); intel_synchronize_irq(i915); flush_work(&log->relay.flush_work); + log->relay.started = false; +} + +void intel_guc_log_relay_close(struct intel_guc_log *log) +{ + guc_log_relay_stop(log); + mutex_lock(&log->relay.lock); - GEM_BUG_ON(!intel_guc_log_relay_enabled(log)); + GEM_BUG_ON(!intel_guc_log_relay_created(log)); guc_log_unmap(log); guc_log_relay_destroy(log); mutex_unlock(&log->relay.lock); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h index 6f764879acb1..c252c022c5fc 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.h @@ -47,6 +47,7 @@ struct intel_guc_log { struct i915_vma *vma; struct { void *buf_addr; + bool started; struct work_struct flush_work; struct rchan *channel; struct mutex lock; @@ -65,8 +66,9 @@ int intel_guc_log_create(struct intel_guc_log *log); void intel_guc_log_destroy(struct intel_guc_log *log); int intel_guc_log_set_level(struct intel_guc_log *log, u32 level); -bool intel_guc_log_relay_enabled(const struct intel_guc_log *log); +bool intel_guc_log_relay_created(const struct intel_guc_log *log); int intel_guc_log_relay_open(struct intel_guc_log *log); +int intel_guc_log_relay_start(struct intel_guc_log *log); void intel_guc_log_relay_flush(struct intel_guc_log *log); void intel_guc_log_relay_close(struct intel_guc_log *log); diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h index edf194d23c6b..1949346e714e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_reg.h @@ -83,6 +83,9 @@ #define GEN8_GTCR _MMIO(0x4274) #define GEN8_GTCR_INVALIDATE (1<<0) +#define GEN12_GUC_TLB_INV_CR _MMIO(0xcee8) +#define GEN12_GUC_TLB_INV_CR_INVALIDATE (1 << 0) + #define GUC_ARAT_C6DIS _MMIO(0xA178) #define GUC_SHIM_CONTROL _MMIO(0xc064) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index f325d3dd564f..2498c55e0ea5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -6,12 +6,13 @@ #include <linux/circ_buf.h> #include "gem/i915_gem_context.h" - #include "gt/intel_context.h" #include "gt/intel_engine_pm.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" #include "gt/intel_lrc_reg.h" +#include "gt/intel_ring.h" + #include "intel_guc_submission.h" #include "i915_drv.h" @@ -29,6 +30,12 @@ enum { /** * DOC: GuC-based command submission * + * IMPORTANT NOTE: GuC submission is currently not supported in i915. The GuC + * firmware is moving to an updated submission interface and we plan to + * turn submission back on when that lands. The below documentation (and related + * code) matches the old submission model and will be updated as part of the + * upgrade to the new flow. + * * GuC client: * A intel_guc_client refers to a submission path through GuC. Currently, there * is only one client, which is charged with all submissions to the GuC. This @@ -1004,7 +1011,7 @@ void intel_guc_submission_fini(struct intel_guc *guc) static void guc_interrupts_capture(struct intel_gt *gt) { - struct intel_rps *rps = >->i915->gt_pm.rps; + struct intel_rps *rps = >->rps; struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; enum intel_engine_id id; @@ -1014,7 +1021,7 @@ static void guc_interrupts_capture(struct intel_gt *gt) * to GuC */ irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, gt->i915, id) + for_each_engine(engine, gt, id) ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); /* route USER_INTERRUPT to Host, all others are sent to GuC. */ @@ -1050,7 +1057,7 @@ static void guc_interrupts_capture(struct intel_gt *gt) static void guc_interrupts_release(struct intel_gt *gt) { - struct intel_rps *rps = >->i915->gt_pm.rps; + struct intel_rps *rps = >->rps; struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; enum intel_engine_id id; @@ -1062,7 +1069,7 @@ static void guc_interrupts_release(struct intel_gt *gt) */ irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER); irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING); - for_each_engine(engine, gt->i915, id) + for_each_engine(engine, gt, id) ENGINE_WRITE(engine, RING_MODE_GEN7, irqs); /* route all GT interrupts to the host */ @@ -1119,7 +1126,7 @@ int intel_guc_submission_enable(struct intel_guc *guc) enum intel_engine_id id; int err; - err = i915_inject_load_error(gt->i915, -ENXIO); + err = i915_inject_probe_error(gt->i915, -ENXIO); if (err) return err; @@ -1145,7 +1152,7 @@ int intel_guc_submission_enable(struct intel_guc *guc) /* Take over from manual control of ELSP (execlists) */ guc_interrupts_capture(gt); - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { engine->set_default_submission = guc_set_default_submission; engine->set_default_submission(engine); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c index d4625c97b4f9..32a069841c14 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c @@ -9,6 +9,34 @@ #include "intel_huc.h" #include "i915_drv.h" +/** + * DOC: HuC + * + * The HuC is a dedicated microcontroller for usage in media HEVC (High + * Efficiency Video Coding) operations. Userspace can directly use the firmware + * capabilities by adding HuC specific commands to batch buffers. + * + * The kernel driver is only responsible for loading the HuC firmware and + * triggering its security authentication, which is performed by the GuC. For + * The GuC to correctly perform the authentication, the HuC binary must be + * loaded before the GuC one. Loading the HuC is optional; however, not using + * the HuC might negatively impact power usage and/or performance of media + * workloads, depending on the use-cases. + * + * See https://github.com/intel/media-driver for the latest details on HuC + * functionality. + */ + +/** + * DOC: HuC Memory Management + * + * Similarly to the GuC, the HuC can't do any memory allocations on its own, + * with the difference being that the allocations for HuC usage are handled by + * the userspace driver instead of the kernel one. The HuC accesses the memory + * via the PPGTT belonging to the context loaded on the VCS executing the + * HuC-specific commands. + */ + void intel_huc_init_early(struct intel_huc *huc) { struct drm_i915_private *i915 = huc_to_gt(huc)->i915; @@ -35,7 +63,7 @@ static int intel_huc_rsa_data_create(struct intel_huc *huc) void *vaddr; int err; - err = i915_inject_load_error(gt->i915, -ENXIO); + err = i915_inject_probe_error(gt->i915, -ENXIO); if (err) return err; @@ -118,10 +146,9 @@ void intel_huc_fini(struct intel_huc *huc) * * Called after HuC and GuC firmware loading during intel_uc_init_hw(). * - * This function pins HuC firmware image object into GGTT. - * Then it invokes GuC action to authenticate passing the offset to RSA - * signature through intel_guc_auth_huc(). It then waits for 50ms for - * firmware verification ACK and unpins the object. + * This function invokes the GuC action to authenticate the HuC firmware, + * passing the offset of the RSA signature to intel_guc_auth_huc(). It then + * waits for up to 50ms for firmware verification ACK. */ int intel_huc_auth(struct intel_huc *huc) { @@ -134,7 +161,7 @@ int intel_huc_auth(struct intel_huc *huc) if (!intel_uc_fw_is_loaded(&huc->fw)) return -ENOEXEC; - ret = i915_inject_load_error(gt->i915, -ENXIO); + ret = i915_inject_probe_error(gt->i915, -ENXIO); if (ret) goto fail; @@ -185,7 +212,7 @@ int intel_huc_check_status(struct intel_huc *huc) if (!intel_huc_is_supported(huc)) return -ENODEV; - with_intel_runtime_pm(>->i915->runtime_pm, wakeref) + with_intel_runtime_pm(gt->uncore->rpm, wakeref) status = intel_uncore_read(gt->uncore, huc->status.reg); return (status & huc->status.mask) == huc->status.value; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c index 74602487ed67..d654340d4d03 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c @@ -8,21 +8,6 @@ #include "i915_drv.h" /** - * DOC: HuC Firmware - * - * Motivation: - * GEN9 introduces a new dedicated firmware for usage in media HEVC (High - * Efficiency Video Coding) operations. Userspace can use the firmware - * capabilities by adding HuC specific commands to batch buffers. - * - * Implementation: - * The same firmware loader is used as the GuC. However, the actual - * loading to HW is deferred until GEM initialization is done. - * - * Note that HuC firmware loading must be done before GuC loading. - */ - -/** * intel_huc_fw_init_early() - initializes HuC firmware struct * @huc: intel_huc struct * diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c index 71ee7ab035cc..629b19377a29 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c @@ -20,7 +20,7 @@ static int __intel_uc_reset_hw(struct intel_uc *uc) int ret; u32 guc_status; - ret = i915_inject_load_error(gt->i915, -ENXIO); + ret = i915_inject_probe_error(gt->i915, -ENXIO); if (ret) return ret; @@ -197,7 +197,7 @@ static int guc_enable_communication(struct intel_guc *guc) GEM_BUG_ON(guc_communication_enabled(guc)); - ret = i915_inject_load_error(i915, -ENXIO); + ret = i915_inject_probe_error(i915, -ENXIO); if (ret) return ret; @@ -224,17 +224,7 @@ static int guc_enable_communication(struct intel_guc *guc) return 0; } -static void guc_stop_communication(struct intel_guc *guc) -{ - intel_guc_ct_stop(&guc->ct); - - guc->send = intel_guc_send_nop; - guc->handler = intel_guc_to_host_event_handler_nop; - - guc_clear_mmio_msg(guc); -} - -static void guc_disable_communication(struct intel_guc *guc) +static void __guc_stop_communication(struct intel_guc *guc) { /* * Events generated during or after CT disable are logged by guc in @@ -247,6 +237,20 @@ static void guc_disable_communication(struct intel_guc *guc) guc->send = intel_guc_send_nop; guc->handler = intel_guc_to_host_event_handler_nop; +} + +static void guc_stop_communication(struct intel_guc *guc) +{ + intel_guc_ct_stop(&guc->ct); + + __guc_stop_communication(guc); + + DRM_INFO("GuC communication stopped\n"); +} + +static void guc_disable_communication(struct intel_guc *guc) +{ + __guc_stop_communication(guc); intel_guc_ct_disable(&guc->ct); @@ -368,7 +372,7 @@ static int uc_init_wopcm(struct intel_uc *uc) GEM_BUG_ON(!(size & GUC_WOPCM_SIZE_MASK)); GEM_BUG_ON(size & ~GUC_WOPCM_SIZE_MASK); - err = i915_inject_load_error(gt->i915, -ENXIO); + err = i915_inject_probe_error(gt->i915, -ENXIO); if (err) return err; @@ -537,7 +541,9 @@ void intel_uc_fini_hw(struct intel_uc *uc) if (intel_uc_supports_guc_submission(uc)) intel_guc_submission_disable(guc); - guc_disable_communication(guc); + if (guc_communication_enabled(guc)) + guc_disable_communication(guc); + __uc_sanitize(uc); } @@ -581,7 +587,7 @@ void intel_uc_suspend(struct intel_uc *uc) if (!intel_guc_is_running(guc)) return; - with_intel_runtime_pm(&uc_to_gt(uc)->i915->runtime_pm, wakeref) + with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref) intel_uc_runtime_suspend(uc); } diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index bd22bf11adad..66a30ab7044a 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -37,27 +37,34 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw, /* * List of required GuC and HuC binaries per-platform. * Must be ordered based on platform + revid, from newer to older. + * + * TGL 35.2 is interface-compatible with 33.0 for previous Gens. The deltas + * between 33.0 and 35.2 are only related to new additions to support new Gen12 + * features. */ #define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \ - fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 8, 4, 3238)) \ - fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ - fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 03, 01, 2893)) \ - fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 02, 00, 1810)) \ - fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 01, 8, 2893)) \ - fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 01, 07, 1398)) - -#define __MAKE_UC_FW_PATH(prefix_, name_, separator_, major_, minor_, patch_) \ + fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 3)) \ + fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \ + fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \ + fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \ + fw_def(COFFEELAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \ + fw_def(GEMINILAKE, 0, guc_def(glk, 33, 0, 0), huc_def(glk, 4, 0, 0)) \ + fw_def(KABYLAKE, 0, guc_def(kbl, 33, 0, 0), huc_def(kbl, 4, 0, 0)) \ + fw_def(BROXTON, 0, guc_def(bxt, 33, 0, 0), huc_def(bxt, 2, 0, 0)) \ + fw_def(SKYLAKE, 0, guc_def(skl, 33, 0, 0), huc_def(skl, 2, 0, 0)) + +#define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \ "i915/" \ __stringify(prefix_) name_ \ - __stringify(major_) separator_ \ - __stringify(minor_) separator_ \ + __stringify(major_) "." \ + __stringify(minor_) "." \ __stringify(patch_) ".bin" #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \ - __MAKE_UC_FW_PATH(prefix_, "_guc_", ".", major_, minor_, patch_) + __MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_) #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \ - __MAKE_UC_FW_PATH(prefix_, "_huc_ver", "_", major_, minor_, bld_num_) + __MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_) /* All blobs need to be declared via MODULE_FIRMWARE() */ #define INTEL_UC_MODULE_FW(platform_, revid_, guc_, huc_) \ @@ -218,29 +225,31 @@ static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, { bool user = e == -EINVAL; - if (i915_inject_load_error(i915, e)) { + if (i915_inject_probe_error(i915, e)) { /* non-existing blob */ uc_fw->path = "<invalid>"; uc_fw->user_overridden = user; - } else if (i915_inject_load_error(i915, e)) { + } else if (i915_inject_probe_error(i915, e)) { /* require next major version */ uc_fw->major_ver_wanted += 1; uc_fw->minor_ver_wanted = 0; uc_fw->user_overridden = user; - } else if (i915_inject_load_error(i915, e)) { + } else if (i915_inject_probe_error(i915, e)) { /* require next minor version */ uc_fw->minor_ver_wanted += 1; uc_fw->user_overridden = user; - } else if (uc_fw->major_ver_wanted && i915_inject_load_error(i915, e)) { + } else if (uc_fw->major_ver_wanted && + i915_inject_probe_error(i915, e)) { /* require prev major version */ uc_fw->major_ver_wanted -= 1; uc_fw->minor_ver_wanted = 0; uc_fw->user_overridden = user; - } else if (uc_fw->minor_ver_wanted && i915_inject_load_error(i915, e)) { + } else if (uc_fw->minor_ver_wanted && + i915_inject_probe_error(i915, e)) { /* require prev minor version - hey, this should work! */ uc_fw->minor_ver_wanted -= 1; uc_fw->user_overridden = user; - } else if (user && i915_inject_load_error(i915, e)) { + } else if (user && i915_inject_probe_error(i915, e)) { /* officially unsupported platform */ uc_fw->major_ver_wanted = 0; uc_fw->minor_ver_wanted = 0; @@ -269,7 +278,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) GEM_BUG_ON(!i915->wopcm.size); GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw)); - err = i915_inject_load_error(i915, -ENXIO); + err = i915_inject_probe_error(i915, -ENXIO); if (err) return err; @@ -337,25 +346,10 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw, struct drm_i915_private *i915) } /* Get version numbers from the CSS header */ - switch (uc_fw->type) { - case INTEL_UC_FW_TYPE_GUC: - uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MAJOR, - css->sw_version); - uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_GUC_MINOR, - css->sw_version); - break; - - case INTEL_UC_FW_TYPE_HUC: - uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MAJOR, - css->sw_version); - uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_HUC_MINOR, - css->sw_version); - break; - - default: - MISSING_CASE(uc_fw->type); - break; - } + uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR, + css->sw_version); + uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR, + css->sw_version); if (uc_fw->major_ver_found != uc_fw->major_ver_wanted || uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) { @@ -400,7 +394,7 @@ static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw, struct i915_ggtt *ggtt) { struct drm_mm_node *node = &ggtt->uc_fw; - GEM_BUG_ON(!node->allocated); + GEM_BUG_ON(!drm_mm_node_allocated(node)); GEM_BUG_ON(upper_32_bits(node->start)); GEM_BUG_ON(upper_32_bits(node->start + node->size - 1)); @@ -445,7 +439,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, struct intel_gt *gt, u64 offset; int ret; - ret = i915_inject_load_error(gt->i915, -ETIMEDOUT); + ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT); if (ret) return ret; @@ -506,7 +500,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, struct intel_gt *gt, /* make sure the status was cleared the last time we reset the uc */ GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw)); - err = i915_inject_load_error(gt->i915, -ENOEXEC); + err = i915_inject_probe_error(gt->i915, -ENOEXEC); if (err) return err; diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h index ae58e8a8c53b..029214cdedd5 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw_abi.h @@ -39,9 +39,6 @@ * 3. Length info of each component can be found in header, in dwords. * 4. Modulus and exponent key are not required by driver. They may not appear * in fw. So driver will load a truncated firmware in this case. - * - * The only difference between GuC and HuC firmwares is how the version - * information is saved. */ struct uc_css_header { @@ -69,11 +66,9 @@ struct uc_css_header { char username[8]; char buildnumber[12]; u32 sw_version; -#define CSS_SW_VERSION_GUC_MAJOR (0xFF << 16) -#define CSS_SW_VERSION_GUC_MINOR (0xFF << 8) -#define CSS_SW_VERSION_GUC_PATCH (0xFF << 0) -#define CSS_SW_VERSION_HUC_MAJOR (0xFFFF << 16) -#define CSS_SW_VERSION_HUC_MINOR (0xFFFF << 0) +#define CSS_SW_VERSION_UC_MAJOR (0xFF << 16) +#define CSS_SW_VERSION_UC_MINOR (0xFF << 8) +#define CSS_SW_VERSION_UC_PATCH (0xFF << 0) u32 reserved[14]; u32 header_info; } __packed; diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c index bba0eafe1cdb..d8a80388bd31 100644 --- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c @@ -108,23 +108,15 @@ static bool client_doorbell_in_sync(struct intel_guc_client *client) * validating that the doorbells status expected by the driver matches what the * GuC/HW have. */ -static int igt_guc_clients(void *args) +static int igt_guc_clients(void *arg) { - struct drm_i915_private *dev_priv = args; + struct intel_gt *gt = arg; + struct intel_guc *guc = >->uc.guc; intel_wakeref_t wakeref; - struct intel_guc *guc; int err = 0; - GEM_BUG_ON(!HAS_GT_UC(dev_priv)); - mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - guc = &dev_priv->gt.uc.guc; - if (!guc) { - pr_err("No guc object!\n"); - err = -EINVAL; - goto unlock; - } + GEM_BUG_ON(!HAS_GT_UC(gt->i915)); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); err = check_all_doorbells(guc); if (err) @@ -189,8 +181,7 @@ out: guc_clients_create(guc); guc_clients_enable(guc); unlock: - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - mutex_unlock(&dev_priv->drm.struct_mutex); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); return err; } @@ -201,22 +192,14 @@ unlock: */ static int igt_guc_doorbells(void *arg) { - struct drm_i915_private *dev_priv = arg; + struct intel_gt *gt = arg; + struct intel_guc *guc = >->uc.guc; intel_wakeref_t wakeref; - struct intel_guc *guc; int i, err = 0; u16 db_id; - GEM_BUG_ON(!HAS_GT_UC(dev_priv)); - mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - - guc = &dev_priv->gt.uc.guc; - if (!guc) { - pr_err("No guc object!\n"); - err = -EINVAL; - goto unlock; - } + GEM_BUG_ON(!HAS_GT_UC(gt->i915)); + wakeref = intel_runtime_pm_get(gt->uncore->rpm); err = check_all_doorbells(guc); if (err) @@ -298,20 +281,19 @@ out: guc_client_free(clients[i]); } unlock: - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - mutex_unlock(&dev_priv->drm.struct_mutex); + intel_runtime_pm_put(gt->uncore->rpm, wakeref); return err; } -int intel_guc_live_selftest(struct drm_i915_private *dev_priv) +int intel_guc_live_selftest(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { SUBTEST(igt_guc_clients), SUBTEST(igt_guc_doorbells), }; - if (!USES_GUC_SUBMISSION(dev_priv)) + if (!USES_GUC_SUBMISSION(i915)) return 0; - return i915_subtests(tests, dev_priv); + return intel_gt_live_subtests(tests, &i915->gt); } diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 5ff2437b2998..771420453f82 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm) flags = PIN_MAPPABLE; } - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->ggtt.vm.mutex); mmio_hw_access_pre(dev_priv); ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node, size, I915_GTT_PAGE_SIZE, I915_COLOR_UNEVICTABLE, start, end, flags); mmio_hw_access_post(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&dev_priv->ggtt.vm.mutex); if (ret) gvt_err("fail to alloc %s gm space from host\n", high_gm ? "high" : "low"); @@ -98,9 +98,9 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu) return 0; out_free_aperture: - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->ggtt.vm.mutex); drm_mm_remove_node(&vgpu->gm.low_gm_node); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&dev_priv->ggtt.vm.mutex); return ret; } @@ -108,10 +108,10 @@ static void free_vgpu_gm(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - mutex_lock(&dev_priv->drm.struct_mutex); + mutex_lock(&dev_priv->ggtt.vm.mutex); drm_mm_remove_node(&vgpu->gm.low_gm_node); drm_mm_remove_node(&vgpu->gm.high_gm_node); - mutex_unlock(&dev_priv->drm.struct_mutex); + mutex_unlock(&dev_priv->ggtt.vm.mutex); } /** @@ -198,7 +198,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) mutex_lock(&dev_priv->ggtt.vm.mutex); for (i = 0; i < vgpu_fence_sz(vgpu); i++) { - reg = i915_reserve_fence(dev_priv); + reg = i915_reserve_fence(&dev_priv->ggtt); if (IS_ERR(reg)) goto out_free_fence; diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index e753b1e706e2..6a3ac8cde95d 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -35,7 +35,9 @@ */ #include <linux/slab.h> + #include "i915_drv.h" +#include "gt/intel_ring.h" #include "gvt.h" #include "i915_pvinfo.h" #include "trace.h" diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 4bfaefdf548d..e451298d11c3 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -152,6 +152,7 @@ static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = { static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, struct intel_vgpu_fb_info *info) { + static struct lock_class_key lock_class; struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_gem_object *obj; @@ -161,7 +162,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, drm_gem_private_object_init(dev, &obj->base, roundup(info->size, PAGE_SIZE)); - i915_gem_object_init(obj, &intel_vgpu_gem_ops); + i915_gem_object_init(obj, &intel_vgpu_gem_ops, &lock_class); obj->read_domains = I915_GEM_DOMAIN_GTT; obj->write_domain = 0; diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c index f21b8fb5b37e..d6e7a1189bad 100644 --- a/drivers/gpu/drm/i915/gvt/execlist.c +++ b/drivers/gpu/drm/i915/gvt/execlist.c @@ -534,7 +534,7 @@ static void clean_execlist(struct intel_vgpu *vgpu, struct intel_vgpu_submission *s = &vgpu->submission; intel_engine_mask_t tmp; - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { + for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) { kfree(s->ring_scan_buffer[engine->id]); s->ring_scan_buffer[engine->id] = NULL; s->ring_scan_buffer_size[engine->id] = 0; @@ -548,7 +548,7 @@ static void reset_execlist(struct intel_vgpu *vgpu, struct intel_engine_cs *engine; intel_engine_mask_t tmp; - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) + for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) init_vgpu_execlist(vgpu, engine->id); } diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 25f78196b964..bd12af349123 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -819,13 +819,16 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu, struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; enum intel_gvt_event_type event; - if (reg == _DPA_AUX_CH_CTL) + if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A))) event = AUX_CHANNEL_A; - else if (reg == _PCH_DPB_AUX_CH_CTL || reg == _DPB_AUX_CH_CTL) + else if (reg == _PCH_DPB_AUX_CH_CTL || + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_B))) event = AUX_CHANNEL_B; - else if (reg == _PCH_DPC_AUX_CH_CTL || reg == _DPC_AUX_CH_CTL) + else if (reg == _PCH_DPC_AUX_CH_CTL || + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_C))) event = AUX_CHANNEL_C; - else if (reg == _PCH_DPD_AUX_CH_CTL || reg == _DPD_AUX_CH_CTL) + else if (reg == _PCH_DPD_AUX_CH_CTL || + reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D))) event = AUX_CHANNEL_D; else { WARN_ON(true); @@ -2796,7 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS); MMIO_D(WM_MISC, D_BDW); - MMIO_D(_MMIO(BDW_EDP_PSR_BASE), D_BDW); + MMIO_D(_MMIO(_SRD_CTL_EDP), D_BDW); MMIO_D(_MMIO(0x6671c), D_BDW_PLUS); MMIO_D(_MMIO(0x66c00), D_BDW_PLUS); @@ -2872,11 +2875,11 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DH(FORCEWAKE_MEDIA_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write); MMIO_DH(FORCEWAKE_ACK_MEDIA_GEN9, D_SKL_PLUS, NULL, NULL); - MMIO_F(_MMIO(_DPB_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + MMIO_F(DP_AUX_CH_CTL(AUX_CH_B), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_F(_MMIO(_DPC_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + MMIO_F(DP_AUX_CH_CTL(AUX_CH_C), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_F(_MMIO(_DPD_AUX_CH_CTL), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, + MMIO_F(DP_AUX_CH_CTL(AUX_CH_D), 6 * 4, 0, 0, 0, D_SKL_PLUS, NULL, dp_aux_ch_ctl_mmio_write); MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS); @@ -3417,6 +3420,10 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, } for (i = 0; i < gvt->mmio.num_mmio_block; i++, block++) { + /* pvinfo data doesn't come from hw mmio */ + if (i915_mmio_reg_offset(block->offset) == VGT_PVINFO_PAGE) + continue; + for (j = 0; j < block->size; j += 4) { ret = handler(gvt, i915_mmio_reg_offset(block->offset) + j, diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 343d79c1cb7e..04a5a0d90823 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1564,27 +1564,10 @@ vgpu_id_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "\n"); } -static ssize_t -hw_id_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct mdev_device *mdev = mdev_from_dev(dev); - - if (mdev) { - struct intel_vgpu *vgpu = (struct intel_vgpu *) - mdev_get_drvdata(mdev); - return sprintf(buf, "%u\n", - vgpu->submission.shadow[0]->gem_context->hw_id); - } - return sprintf(buf, "\n"); -} - static DEVICE_ATTR_RO(vgpu_id); -static DEVICE_ATTR_RO(hw_id); static struct attribute *intel_vgpu_attrs[] = { &dev_attr_vgpu_id.attr, - &dev_attr_hw_id.attr, NULL }; diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index 4208e40445b1..aaf15916d29a 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -35,6 +35,7 @@ #include "i915_drv.h" #include "gt/intel_context.h" +#include "gt/intel_ring.h" #include "gvt.h" #include "trace.h" diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 6c79d16b381e..5b2a7d072ec9 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -38,6 +38,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_pm.h" #include "gt/intel_context.h" +#include "gt/intel_ring.h" #include "i915_drv.h" #include "gvt.h" @@ -194,7 +195,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) return -EFAULT; } - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); + page = i915_gem_object_get_page(ctx_obj, i); dst = kmap(page); intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, I915_GTT_PAGE_SIZE); @@ -365,7 +366,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, struct i915_gem_context *ctx) { struct intel_vgpu_mm *mm = workload->shadow_mm; - struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ctx->vm); + struct i915_ppgtt *ppgtt = + i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx)); int i = 0; if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) { @@ -378,6 +380,8 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload, px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i]; } } + + i915_vm_put(&ppgtt->vm); } static int @@ -385,11 +389,8 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct i915_request *rq; - lockdep_assert_held(&dev_priv->drm.struct_mutex); - if (workload->req) return 0; @@ -415,10 +416,9 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu_submission *s = &vgpu->submission; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; int ret; - lockdep_assert_held(&dev_priv->drm.struct_mutex); + lockdep_assert_held(&vgpu->vgpu_lock); if (workload->shadow) return 0; @@ -580,8 +580,6 @@ static void update_vreg_in_ctx(struct intel_vgpu_workload *workload) static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) { - struct intel_vgpu *vgpu = workload->vgpu; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct intel_vgpu_shadow_bb *bb, *pos; if (list_empty(&workload->shadow_bb)) @@ -590,8 +588,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) bb = list_first_entry(&workload->shadow_bb, struct intel_vgpu_shadow_bb, list); - mutex_lock(&dev_priv->drm.struct_mutex); - list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) { if (bb->obj) { if (bb->accessing) @@ -609,8 +605,6 @@ static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload) list_del(&bb->list); kfree(bb); } - - mutex_unlock(&dev_priv->drm.struct_mutex); } static int prepare_workload(struct intel_vgpu_workload *workload) @@ -685,7 +679,6 @@ err_unpin_mm: static int dispatch_workload(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; - struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct i915_request *rq; int ring_id = workload->ring_id; int ret; @@ -694,7 +687,6 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ring_id, workload); mutex_lock(&vgpu->vgpu_lock); - mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_workload_req_alloc(workload); if (ret) @@ -729,7 +721,6 @@ out: err_req: if (ret) workload->status = ret; - mutex_unlock(&dev_priv->drm.struct_mutex); mutex_unlock(&vgpu->vgpu_lock); return ret; } @@ -844,7 +835,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) return; } - page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); + page = i915_gem_object_get_page(ctx_obj, i); src = kmap(page); intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, I915_GTT_PAGE_SIZE); @@ -887,7 +878,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu, intel_engine_mask_t tmp; /* free the unsubmited workloads in the queues. */ - for_each_engine_masked(engine, dev_priv, engine_mask, tmp) { + for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp) { list_for_each_entry_safe(pos, n, &s->workload_q_head[engine->id], list) { list_del_init(&pos->list); @@ -1233,20 +1224,18 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) struct intel_vgpu_submission *s = &vgpu->submission; struct intel_engine_cs *engine; struct i915_gem_context *ctx; + struct i915_ppgtt *ppgtt; enum intel_engine_id i; int ret; - mutex_lock(&i915->drm.struct_mutex); - ctx = i915_gem_context_create_kernel(i915, I915_PRIORITY_MAX); - if (IS_ERR(ctx)) { - ret = PTR_ERR(ctx); - goto out_unlock; - } + if (IS_ERR(ctx)) + return PTR_ERR(ctx); i915_gem_context_set_force_single_submission(ctx); - i915_context_ppgtt_root_save(s, i915_vm_to_ppgtt(ctx->vm)); + ppgtt = i915_vm_to_ppgtt(i915_gem_context_get_vm_rcu(ctx)); + i915_context_ppgtt_root_save(s, ppgtt); for_each_engine(engine, i915, i) { struct intel_context *ce; @@ -1291,12 +1280,12 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu) atomic_set(&s->running_workload_num, 0); bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES); + i915_vm_put(&ppgtt->vm); i915_gem_context_put(ctx); - mutex_unlock(&i915->drm.struct_mutex); return 0; out_shadow_ctx: - i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(ctx->vm)); + i915_context_ppgtt_root_restore(s, ppgtt); for_each_engine(engine, i915, i) { if (IS_ERR(s->shadow[i])) break; @@ -1304,9 +1293,8 @@ out_shadow_ctx: intel_context_unpin(s->shadow[i]); intel_context_put(s->shadow[i]); } + i915_vm_put(&ppgtt->vm); i915_gem_context_put(ctx); -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); return ret; } @@ -1597,9 +1585,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, */ if (list_empty(workload_q_head(vgpu, ring_id))) { intel_runtime_pm_get(&dev_priv->runtime_pm); - mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_scan_and_shadow_workload(workload); - mutex_unlock(&dev_priv->drm.struct_mutex); intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 48e16ad93bbd..3c424cb90702 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -7,13 +7,12 @@ #include <linux/debugobjects.h> #include "gt/intel_engine_pm.h" +#include "gt/intel_ring.h" #include "i915_drv.h" #include "i915_active.h" #include "i915_globals.h" -#define BKL(ref) (&(ref)->i915->drm.struct_mutex) - /* * Active refs memory management * @@ -27,35 +26,35 @@ static struct i915_global_active { } global; struct active_node { - struct i915_active_request base; + struct i915_active_fence base; struct i915_active *ref; struct rb_node node; u64 timeline; }; static inline struct active_node * -node_from_active(struct i915_active_request *active) +node_from_active(struct i915_active_fence *active) { return container_of(active, struct active_node, base); } #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers) -static inline bool is_barrier(const struct i915_active_request *active) +static inline bool is_barrier(const struct i915_active_fence *active) { - return IS_ERR(rcu_access_pointer(active->request)); + return IS_ERR(rcu_access_pointer(active->fence)); } static inline struct llist_node *barrier_to_ll(struct active_node *node) { GEM_BUG_ON(!is_barrier(&node->base)); - return (struct llist_node *)&node->base.link; + return (struct llist_node *)&node->base.cb.node; } static inline struct intel_engine_cs * __barrier_to_engine(struct active_node *node) { - return (struct intel_engine_cs *)READ_ONCE(node->base.link.prev); + return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev); } static inline struct intel_engine_cs * @@ -68,7 +67,7 @@ barrier_to_engine(struct active_node *node) static inline struct active_node *barrier_from_ll(struct llist_node *x) { return container_of((struct list_head *)x, - struct active_node, base.link); + struct active_node, base.cb.node); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS) @@ -92,12 +91,17 @@ static void debug_active_init(struct i915_active *ref) static void debug_active_activate(struct i915_active *ref) { - debug_object_activate(ref, &active_debug_desc); + spin_lock_irq(&ref->tree_lock); + if (!atomic_read(&ref->count)) /* before the first inc */ + debug_object_activate(ref, &active_debug_desc); + spin_unlock_irq(&ref->tree_lock); } static void debug_active_deactivate(struct i915_active *ref) { - debug_object_deactivate(ref, &active_debug_desc); + lockdep_assert_held(&ref->tree_lock); + if (!atomic_read(&ref->count)) /* after the last dec */ + debug_object_deactivate(ref, &active_debug_desc); } static void debug_active_fini(struct i915_active *ref) @@ -125,31 +129,46 @@ __active_retire(struct i915_active *ref) { struct active_node *it, *n; struct rb_root root; - bool retire = false; + unsigned long flags; - lockdep_assert_held(&ref->mutex); + GEM_BUG_ON(i915_active_is_idle(ref)); /* return the unused nodes to our slabcache -- flushing the allocator */ - if (atomic_dec_and_test(&ref->count)) { - debug_active_deactivate(ref); - root = ref->tree; - ref->tree = RB_ROOT; - ref->cache = NULL; - retire = true; - } - - mutex_unlock(&ref->mutex); - if (!retire) + if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags)) return; - rbtree_postorder_for_each_entry_safe(it, n, &root, node) { - GEM_BUG_ON(i915_active_request_isset(&it->base)); - kmem_cache_free(global.slab_cache, it); - } + GEM_BUG_ON(rcu_access_pointer(ref->excl.fence)); + debug_active_deactivate(ref); + + root = ref->tree; + ref->tree = RB_ROOT; + ref->cache = NULL; + + spin_unlock_irqrestore(&ref->tree_lock, flags); /* After the final retire, the entire struct may be freed */ if (ref->retire) ref->retire(ref); + + /* ... except if you wait on it, you must manage your own references! */ + wake_up_var(ref); + + rbtree_postorder_for_each_entry_safe(it, n, &root, node) { + GEM_BUG_ON(i915_active_fence_isset(&it->base)); + kmem_cache_free(global.slab_cache, it); + } +} + +static void +active_work(struct work_struct *wrk) +{ + struct i915_active *ref = container_of(wrk, typeof(*ref), work); + + GEM_BUG_ON(!atomic_read(&ref->count)); + if (atomic_add_unless(&ref->count, -1, 1)) + return; + + __active_retire(ref); } static void @@ -159,18 +178,29 @@ active_retire(struct i915_active *ref) if (atomic_add_unless(&ref->count, -1, 1)) return; - /* One active may be flushed from inside the acquire of another */ - mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); + if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) { + queue_work(system_unbound_wq, &ref->work); + return; + } + __active_retire(ref); } static void -node_retire(struct i915_active_request *base, struct i915_request *rq) +node_retire(struct dma_fence *fence, struct dma_fence_cb *cb) { - active_retire(node_from_active(base)->ref); + i915_active_fence_cb(fence, cb); + active_retire(container_of(cb, struct active_node, base.cb)->ref); } -static struct i915_active_request * +static void +excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb) +{ + i915_active_fence_cb(fence, cb); + active_retire(container_of(cb, struct i915_active, excl.cb)); +} + +static struct i915_active_fence * active_instance(struct i915_active *ref, struct intel_timeline *tl) { struct active_node *node, *prealloc; @@ -193,7 +223,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl) if (!prealloc) return NULL; - mutex_lock(&ref->mutex); + spin_lock_irq(&ref->tree_lock); GEM_BUG_ON(i915_active_is_idle(ref)); parent = NULL; @@ -214,7 +244,7 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl) } node = prealloc; - i915_active_request_init(&node->base, &tl->mutex, NULL, node_retire); + __i915_active_fence_init(&node->base, &tl->mutex, NULL, node_retire); node->ref = ref; node->timeline = idx; @@ -223,29 +253,36 @@ active_instance(struct i915_active *ref, struct intel_timeline *tl) out: ref->cache = node; - mutex_unlock(&ref->mutex); + spin_unlock_irq(&ref->tree_lock); BUILD_BUG_ON(offsetof(typeof(*node), base)); return &node->base; } -void __i915_active_init(struct drm_i915_private *i915, - struct i915_active *ref, +void __i915_active_init(struct i915_active *ref, int (*active)(struct i915_active *ref), void (*retire)(struct i915_active *ref), struct lock_class_key *key) { + unsigned long bits; + debug_active_init(ref); - ref->i915 = i915; ref->flags = 0; ref->active = active; - ref->retire = retire; + ref->retire = ptr_unpack_bits(retire, &bits, 2); + if (bits & I915_ACTIVE_MAY_SLEEP) + ref->flags |= I915_ACTIVE_RETIRE_SLEEPS; + + spin_lock_init(&ref->tree_lock); ref->tree = RB_ROOT; ref->cache = NULL; + init_llist_head(&ref->preallocated_barriers); atomic_set(&ref->count, 0); __mutex_init(&ref->mutex, "i915_active", key); + __i915_active_fence_init(&ref->excl, &ref->mutex, NULL, excl_retire); + INIT_WORK(&ref->work, active_work); } static bool ____active_del_barrier(struct i915_active *ref, @@ -298,9 +335,9 @@ __active_del_barrier(struct i915_active *ref, struct active_node *node) int i915_active_ref(struct i915_active *ref, struct intel_timeline *tl, - struct i915_request *rq) + struct dma_fence *fence) { - struct i915_active_request *active; + struct i915_active_fence *active; int err; lockdep_assert_held(&tl->mutex); @@ -323,26 +360,44 @@ int i915_active_ref(struct i915_active *ref, * request that we want to emit on the kernel_context. */ __active_del_barrier(ref, node_from_active(active)); - RCU_INIT_POINTER(active->request, NULL); - INIT_LIST_HEAD(&active->link); - } else { - if (!i915_active_request_isset(active)) - atomic_inc(&ref->count); + RCU_INIT_POINTER(active->fence, NULL); + atomic_dec(&ref->count); } - GEM_BUG_ON(!atomic_read(&ref->count)); - __i915_active_request_set(active, rq); + if (!__i915_active_fence_set(active, fence)) + atomic_inc(&ref->count); out: i915_active_release(ref); return err; } +void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) +{ + /* We expect the caller to manage the exclusive timeline ordering */ + GEM_BUG_ON(i915_active_is_idle(ref)); + + /* + * As we don't know which mutex the caller is using, we told a small + * lie to the debug code that it is using the i915_active.mutex; + * and now we must stick to that lie. + */ + mutex_acquire(&ref->mutex.dep_map, 0, 0, _THIS_IP_); + if (!__i915_active_fence_set(&ref->excl, f)) + atomic_inc(&ref->count); + mutex_release(&ref->mutex.dep_map, _THIS_IP_); +} + +bool i915_active_acquire_if_busy(struct i915_active *ref) +{ + debug_active_assert(ref); + return atomic_add_unless(&ref->count, 1, 0); +} + int i915_active_acquire(struct i915_active *ref) { int err; - debug_active_assert(ref); - if (atomic_add_unless(&ref->count, 1, 0)) + if (i915_active_acquire_if_busy(ref)) return 0; err = mutex_lock_interruptible(&ref->mutex); @@ -367,109 +422,66 @@ void i915_active_release(struct i915_active *ref) active_retire(ref); } -static void __active_ungrab(struct i915_active *ref) -{ - clear_and_wake_up_bit(I915_ACTIVE_GRAB_BIT, &ref->flags); -} - -bool i915_active_trygrab(struct i915_active *ref) +static void enable_signaling(struct i915_active_fence *active) { - debug_active_assert(ref); - - if (test_and_set_bit(I915_ACTIVE_GRAB_BIT, &ref->flags)) - return false; + struct dma_fence *fence; - if (!atomic_add_unless(&ref->count, 1, 0)) { - __active_ungrab(ref); - return false; - } + fence = i915_active_fence_get(active); + if (!fence) + return; - return true; -} - -void i915_active_ungrab(struct i915_active *ref) -{ - GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags)); - - active_retire(ref); - __active_ungrab(ref); + dma_fence_enable_sw_signaling(fence); + dma_fence_put(fence); } int i915_active_wait(struct i915_active *ref) { struct active_node *it, *n; - int err; + int err = 0; might_sleep(); - might_lock(&ref->mutex); - if (i915_active_is_idle(ref)) + if (!i915_active_acquire_if_busy(ref)) return 0; - err = mutex_lock_interruptible(&ref->mutex); - if (err) - return err; - - if (!atomic_add_unless(&ref->count, 1, 0)) { - mutex_unlock(&ref->mutex); - return 0; - } - + /* Flush lazy signals */ + enable_signaling(&ref->excl); rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { - if (is_barrier(&it->base)) { /* unconnected idle-barrier */ - err = -EBUSY; - break; - } + if (is_barrier(&it->base)) /* unconnected idle barrier */ + continue; - err = i915_active_request_retire(&it->base, BKL(ref)); - if (err) - break; + enable_signaling(&it->base); } + /* Any fence added after the wait begins will not be auto-signaled */ - __active_retire(ref); + i915_active_release(ref); if (err) return err; - if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE)) + if (wait_var_event_interruptible(ref, i915_active_is_idle(ref))) return -EINTR; - if (!i915_active_is_idle(ref)) - return -EBUSY; - return 0; } -int i915_request_await_active_request(struct i915_request *rq, - struct i915_active_request *active) -{ - struct i915_request *barrier = - i915_active_request_raw(active, &rq->i915->drm.struct_mutex); - - return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0; -} - int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) { - struct active_node *it, *n; - int err; - - if (RB_EMPTY_ROOT(&ref->tree)) - return 0; + int err = 0; - /* await allocates and so we need to avoid hitting the shrinker */ - err = i915_active_acquire(ref); - if (err) - return err; + if (rcu_access_pointer(ref->excl.fence)) { + struct dma_fence *fence; - mutex_lock(&ref->mutex); - rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { - err = i915_request_await_active_request(rq, &it->base); - if (err) - break; + rcu_read_lock(); + fence = dma_fence_get_rcu_safe(&ref->excl.fence); + rcu_read_unlock(); + if (fence) { + err = i915_request_await_dma_fence(rq, fence); + dma_fence_put(fence); + } } - mutex_unlock(&ref->mutex); - i915_active_release(ref); + /* In the future we may choose to await on all fences */ + return err; } @@ -477,15 +489,16 @@ int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) void i915_active_fini(struct i915_active *ref) { debug_active_fini(ref); - GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree)); GEM_BUG_ON(atomic_read(&ref->count)); + GEM_BUG_ON(work_pending(&ref->work)); + GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree)); mutex_destroy(&ref->mutex); } #endif static inline bool is_idle_barrier(struct active_node *node, u64 idx) { - return node->timeline == idx && !i915_active_request_isset(&node->base); + return node->timeline == idx && !i915_active_fence_isset(&node->base); } static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) @@ -495,7 +508,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) if (RB_EMPTY_ROOT(&ref->tree)) return NULL; - mutex_lock(&ref->mutex); + spin_lock_irq(&ref->tree_lock); GEM_BUG_ON(i915_active_is_idle(ref)); /* @@ -560,7 +573,7 @@ static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) goto match; } - mutex_unlock(&ref->mutex); + spin_unlock_irq(&ref->tree_lock); return NULL; @@ -568,7 +581,7 @@ match: rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */ if (p == &ref->cache->node) ref->cache = NULL; - mutex_unlock(&ref->mutex); + spin_unlock_irq(&ref->tree_lock); return rb_entry(p, struct active_node, node); } @@ -576,11 +589,12 @@ match: int i915_active_acquire_preallocate_barrier(struct i915_active *ref, struct intel_engine_cs *engine) { - struct drm_i915_private *i915 = engine->i915; intel_engine_mask_t tmp, mask = engine->mask; + struct intel_gt *gt = engine->gt; struct llist_node *pos, *next; int err; + GEM_BUG_ON(i915_active_is_idle(ref)); GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); /* @@ -589,7 +603,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, * We can then use the preallocated nodes in * i915_active_acquire_barrier() */ - for_each_engine_masked(engine, i915, mask, tmp) { + for_each_engine_masked(engine, gt, mask, tmp) { u64 idx = engine->kernel_context->timeline->fence_context; struct active_node *node; @@ -605,13 +619,13 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, node->base.lock = &engine->kernel_context->timeline->mutex; #endif - RCU_INIT_POINTER(node->base.request, NULL); - node->base.retire = node_retire; + RCU_INIT_POINTER(node->base.fence, NULL); + node->base.cb.func = node_retire; node->timeline = idx; node->ref = ref; } - if (!i915_active_request_isset(&node->base)) { + if (!i915_active_fence_isset(&node->base)) { /* * Mark this as being *our* unconnected proto-node. * @@ -621,8 +635,8 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, * and then we can use the rb_node and list pointers * for our tracking of the pending barrier. */ - RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN)); - node->base.link.prev = (void *)engine; + RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN)); + node->base.cb.node.prev = (void *)engine; atomic_inc(&ref->count); } @@ -648,6 +662,7 @@ unwind: void i915_active_acquire_barrier(struct i915_active *ref) { struct llist_node *pos, *next; + unsigned long flags; GEM_BUG_ON(i915_active_is_idle(ref)); @@ -657,7 +672,7 @@ void i915_active_acquire_barrier(struct i915_active *ref) * populated by i915_request_add_active_barriers() to point to the * request that will eventually release them. */ - mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING); + spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING); llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { struct active_node *node = barrier_from_ll(pos); struct intel_engine_cs *engine = barrier_to_engine(node); @@ -679,54 +694,124 @@ void i915_active_acquire_barrier(struct i915_active *ref) rb_link_node(&node->node, parent, p); rb_insert_color(&node->node, &ref->tree); + GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); llist_add(barrier_to_ll(node), &engine->barrier_tasks); intel_engine_pm_put(engine); } - mutex_unlock(&ref->mutex); + spin_unlock_irqrestore(&ref->tree_lock, flags); } void i915_request_add_active_barriers(struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; struct llist_node *node, *next; + unsigned long flags; GEM_BUG_ON(intel_engine_is_virtual(engine)); - GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline); + GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline); + node = llist_del_all(&engine->barrier_tasks); + if (!node) + return; /* * Attach the list of proto-fences to the in-flight request such * that the parent i915_active will be released when this request * is retired. */ - llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) { - RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq); + spin_lock_irqsave(&rq->lock, flags); + llist_for_each_safe(node, next, node) { + RCU_INIT_POINTER(barrier_from_ll(node)->base.fence, &rq->fence); smp_wmb(); /* serialise with reuse_idle_barrier */ - list_add_tail((struct list_head *)node, &rq->active_list); + list_add_tail((struct list_head *)node, &rq->fence.cb_list); } + spin_unlock_irqrestore(&rq->lock, flags); } -int i915_active_request_set(struct i915_active_request *active, - struct i915_request *rq) +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) +#define active_is_held(active) lockdep_is_held((active)->lock) +#else +#define active_is_held(active) true +#endif + +/* + * __i915_active_fence_set: Update the last active fence along its timeline + * @active: the active tracker + * @fence: the new fence (under construction) + * + * Records the new @fence as the last active fence along its timeline in + * this active tracker, moving the tracking callbacks from the previous + * fence onto this one. Returns the previous fence (if not already completed), + * which the caller must ensure is executed before the new fence. To ensure + * that the order of fences within the timeline of the i915_active_fence is + * maintained, it must be locked by the caller. + */ +struct dma_fence * +__i915_active_fence_set(struct i915_active_fence *active, + struct dma_fence *fence) { - int err; + struct dma_fence *prev; + unsigned long flags; + + /* NB: must be serialised by an outer timeline mutex (active->lock) */ + spin_lock_irqsave(fence->lock, flags); + GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); + + prev = rcu_dereference_protected(active->fence, active_is_held(active)); + if (prev) { + GEM_BUG_ON(prev == fence); + spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); + __list_del_entry(&active->cb.node); + spin_unlock(prev->lock); /* serialise with prev->cb_list */ + + /* + * active->fence is reset by the callback from inside + * interrupt context. We need to serialise our list + * manipulation with the fence->lock to prevent the prev + * being lost inside an interrupt (it can't be replaced as + * no other caller is allowed to enter __i915_active_fence_set + * as we hold the timeline lock). After serialising with + * the callback, we need to double check which ran first, + * our list_del() [decoupling prev from the callback] or + * the callback... + */ + prev = rcu_access_pointer(active->fence); + } + + rcu_assign_pointer(active->fence, fence); + list_add_tail(&active->cb.node, &fence->cb_list); + + spin_unlock_irqrestore(fence->lock, flags); + + return prev; +} + +int i915_active_fence_set(struct i915_active_fence *active, + struct i915_request *rq) +{ + struct dma_fence *fence; + int err = 0; #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) lockdep_assert_held(active->lock); #endif - /* Must maintain ordering wrt previous active requests */ - err = i915_request_await_active_request(rq, active); - if (err) - return err; + /* Must maintain timeline ordering wrt previous active requests */ + rcu_read_lock(); + fence = __i915_active_fence_set(active, &rq->fence); + if (fence) /* but the previous fence may not belong to that timeline! */ + fence = dma_fence_get_rcu(fence); + rcu_read_unlock(); + if (fence) { + err = i915_request_await_dma_fence(rq, fence); + dma_fence_put(fence); + } - __i915_active_request_set(active, rq); - return 0; + return err; } -void i915_active_retire_noop(struct i915_active_request *active, - struct i915_request *request) +void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb) { - /* Space left intentionally blank */ + i915_active_fence_cb(fence, cb); } #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h index f95058f99057..44859356ce97 100644 --- a/drivers/gpu/drm/i915/i915_active.h +++ b/drivers/gpu/drm/i915/i915_active.h @@ -12,6 +12,10 @@ #include "i915_active_types.h" #include "i915_request.h" +struct i915_request; +struct intel_engine_cs; +struct intel_timeline; + /* * We treat requests as fences. This is not be to confused with our * "fence registers" but pipeline synchronisation objects ala GL_ARB_sync. @@ -28,308 +32,108 @@ * write access so that we can perform concurrent read operations between * the CPU and GPU engines, as well as waiting for all rendering to * complete, or waiting for the last GPU user of a "fence register". The - * object then embeds a #i915_active_request to track the most recent (in + * object then embeds a #i915_active_fence to track the most recent (in * retirement order) request relevant for the desired mode of access. - * The #i915_active_request is updated with i915_active_request_set() to + * The #i915_active_fence is updated with i915_active_fence_set() to * track the most recent fence request, typically this is done as part of * i915_vma_move_to_active(). * - * When the #i915_active_request completes (is retired), it will + * When the #i915_active_fence completes (is retired), it will * signal its completion to the owner through a callback as well as mark - * itself as idle (i915_active_request.request == NULL). The owner + * itself as idle (i915_active_fence.request == NULL). The owner * can then perform any action, such as delayed freeing of an active * resource including itself. */ -void i915_active_retire_noop(struct i915_active_request *active, - struct i915_request *request); +void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb); /** - * i915_active_request_init - prepares the activity tracker for use + * __i915_active_fence_init - prepares the activity tracker for use * @active - the active tracker - * @rq - initial request to track, can be NULL + * @fence - initial fence to track, can be NULL * @func - a callback when then the tracker is retired (becomes idle), * can be NULL * - * i915_active_request_init() prepares the embedded @active struct for use as - * an activity tracker, that is for tracking the last known active request - * associated with it. When the last request becomes idle, when it is retired + * i915_active_fence_init() prepares the embedded @active struct for use as + * an activity tracker, that is for tracking the last known active fence + * associated with it. When the last fence becomes idle, when it is retired * after completion, the optional callback @func is invoked. */ static inline void -i915_active_request_init(struct i915_active_request *active, +__i915_active_fence_init(struct i915_active_fence *active, struct mutex *lock, - struct i915_request *rq, - i915_active_retire_fn retire) + void *fence, + dma_fence_func_t fn) { - RCU_INIT_POINTER(active->request, rq); - INIT_LIST_HEAD(&active->link); - active->retire = retire ?: i915_active_retire_noop; + RCU_INIT_POINTER(active->fence, fence); + active->cb.func = fn ?: i915_active_noop; #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) active->lock = lock; #endif } -#define INIT_ACTIVE_REQUEST(name, lock) \ - i915_active_request_init((name), (lock), NULL, NULL) - -/** - * i915_active_request_set - updates the tracker to watch the current request - * @active - the active tracker - * @request - the request to watch - * - * __i915_active_request_set() watches the given @request for completion. Whilst - * that @request is busy, the @active reports busy. When that @request is - * retired, the @active tracker is updated to report idle. - */ -static inline void -__i915_active_request_set(struct i915_active_request *active, - struct i915_request *request) -{ -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) - lockdep_assert_held(active->lock); -#endif - list_move(&active->link, &request->active_list); - rcu_assign_pointer(active->request, request); -} +#define INIT_ACTIVE_FENCE(A, LOCK) \ + __i915_active_fence_init((A), (LOCK), NULL, NULL) -int __must_check -i915_active_request_set(struct i915_active_request *active, - struct i915_request *rq); +struct dma_fence * +__i915_active_fence_set(struct i915_active_fence *active, + struct dma_fence *fence); /** - * i915_active_request_raw - return the active request + * i915_active_fence_set - updates the tracker to watch the current fence * @active - the active tracker + * @rq - the request to watch * - * i915_active_request_raw() returns the current request being tracked, or NULL. - * It does not obtain a reference on the request for the caller, so the caller - * must hold struct_mutex. + * i915_active_fence_set() watches the given @rq for completion. While + * that @rq is busy, the @active reports busy. When that @rq is signaled + * (or else retired) the @active tracker is updated to report idle. */ -static inline struct i915_request * -i915_active_request_raw(const struct i915_active_request *active, - struct mutex *mutex) -{ - return rcu_dereference_protected(active->request, - lockdep_is_held(mutex)); -} - -/** - * i915_active_request_peek - report the active request being monitored - * @active - the active tracker - * - * i915_active_request_peek() returns the current request being tracked if - * still active, or NULL. It does not obtain a reference on the request - * for the caller, so the caller must hold struct_mutex. - */ -static inline struct i915_request * -i915_active_request_peek(const struct i915_active_request *active, - struct mutex *mutex) -{ - struct i915_request *request; - - request = i915_active_request_raw(active, mutex); - if (!request || i915_request_completed(request)) - return NULL; - - return request; -} - -/** - * i915_active_request_get - return a reference to the active request - * @active - the active tracker - * - * i915_active_request_get() returns a reference to the active request, or NULL - * if the active tracker is idle. The caller must hold struct_mutex. - */ -static inline struct i915_request * -i915_active_request_get(const struct i915_active_request *active, - struct mutex *mutex) -{ - return i915_request_get(i915_active_request_peek(active, mutex)); -} - -/** - * __i915_active_request_get_rcu - return a reference to the active request - * @active - the active tracker - * - * __i915_active_request_get() returns a reference to the active request, - * or NULL if the active tracker is idle. The caller must hold the RCU read - * lock, but the returned pointer is safe to use outside of RCU. - */ -static inline struct i915_request * -__i915_active_request_get_rcu(const struct i915_active_request *active) -{ - /* - * Performing a lockless retrieval of the active request is super - * tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing - * slab of request objects will not be freed whilst we hold the - * RCU read lock. It does not guarantee that the request itself - * will not be freed and then *reused*. Viz, - * - * Thread A Thread B - * - * rq = active.request - * retire(rq) -> free(rq); - * (rq is now first on the slab freelist) - * active.request = NULL - * - * rq = new submission on a new object - * ref(rq) - * - * To prevent the request from being reused whilst the caller - * uses it, we take a reference like normal. Whilst acquiring - * the reference we check that it is not in a destroyed state - * (refcnt == 0). That prevents the request being reallocated - * whilst the caller holds on to it. To check that the request - * was not reallocated as we acquired the reference we have to - * check that our request remains the active request across - * the lookup, in the same manner as a seqlock. The visibility - * of the pointer versus the reference counting is controlled - * by using RCU barriers (rcu_dereference and rcu_assign_pointer). - * - * In the middle of all that, we inspect whether the request is - * complete. Retiring is lazy so the request may be completed long - * before the active tracker is updated. Querying whether the - * request is complete is far cheaper (as it involves no locked - * instructions setting cachelines to exclusive) than acquiring - * the reference, so we do it first. The RCU read lock ensures the - * pointer dereference is valid, but does not ensure that the - * seqno nor HWS is the right one! However, if the request was - * reallocated, that means the active tracker's request was complete. - * If the new request is also complete, then both are and we can - * just report the active tracker is idle. If the new request is - * incomplete, then we acquire a reference on it and check that - * it remained the active request. - * - * It is then imperative that we do not zero the request on - * reallocation, so that we can chase the dangling pointers! - * See i915_request_alloc(). - */ - do { - struct i915_request *request; - - request = rcu_dereference(active->request); - if (!request || i915_request_completed(request)) - return NULL; - - /* - * An especially silly compiler could decide to recompute the - * result of i915_request_completed, more specifically - * re-emit the load for request->fence.seqno. A race would catch - * a later seqno value, which could flip the result from true to - * false. Which means part of the instructions below might not - * be executed, while later on instructions are executed. Due to - * barriers within the refcounting the inconsistency can't reach - * past the call to i915_request_get_rcu, but not executing - * that while still executing i915_request_put() creates - * havoc enough. Prevent this with a compiler barrier. - */ - barrier(); - - request = i915_request_get_rcu(request); - - /* - * What stops the following rcu_access_pointer() from occurring - * before the above i915_request_get_rcu()? If we were - * to read the value before pausing to get the reference to - * the request, we may not notice a change in the active - * tracker. - * - * The rcu_access_pointer() is a mere compiler barrier, which - * means both the CPU and compiler are free to perform the - * memory read without constraint. The compiler only has to - * ensure that any operations after the rcu_access_pointer() - * occur afterwards in program order. This means the read may - * be performed earlier by an out-of-order CPU, or adventurous - * compiler. - * - * The atomic operation at the heart of - * i915_request_get_rcu(), see dma_fence_get_rcu(), is - * atomic_inc_not_zero() which is only a full memory barrier - * when successful. That is, if i915_request_get_rcu() - * returns the request (and so with the reference counted - * incremented) then the following read for rcu_access_pointer() - * must occur after the atomic operation and so confirm - * that this request is the one currently being tracked. - * - * The corresponding write barrier is part of - * rcu_assign_pointer(). - */ - if (!request || request == rcu_access_pointer(active->request)) - return rcu_pointer_handoff(request); - - i915_request_put(request); - } while (1); -} - +int __must_check +i915_active_fence_set(struct i915_active_fence *active, + struct i915_request *rq); /** - * i915_active_request_get_unlocked - return a reference to the active request + * i915_active_fence_get - return a reference to the active fence * @active - the active tracker * - * i915_active_request_get_unlocked() returns a reference to the active request, + * i915_active_fence_get() returns a reference to the active fence, * or NULL if the active tracker is idle. The reference is obtained under RCU, * so no locking is required by the caller. * - * The reference should be freed with i915_request_put(). + * The reference should be freed with dma_fence_put(). */ -static inline struct i915_request * -i915_active_request_get_unlocked(const struct i915_active_request *active) +static inline struct dma_fence * +i915_active_fence_get(struct i915_active_fence *active) { - struct i915_request *request; + struct dma_fence *fence; rcu_read_lock(); - request = __i915_active_request_get_rcu(active); + fence = dma_fence_get_rcu_safe(&active->fence); rcu_read_unlock(); - return request; + return fence; } /** - * i915_active_request_isset - report whether the active tracker is assigned + * i915_active_fence_isset - report whether the active tracker is assigned * @active - the active tracker * - * i915_active_request_isset() returns true if the active tracker is currently - * assigned to a request. Due to the lazy retiring, that request may be idle + * i915_active_fence_isset() returns true if the active tracker is currently + * assigned to a fence. Due to the lazy retiring, that fence may be idle * and this may report stale information. */ static inline bool -i915_active_request_isset(const struct i915_active_request *active) +i915_active_fence_isset(const struct i915_active_fence *active) { - return rcu_access_pointer(active->request); + return rcu_access_pointer(active->fence); } -/** - * i915_active_request_retire - waits until the request is retired - * @active - the active request on which to wait - * - * i915_active_request_retire() waits until the request is completed, - * and then ensures that at least the retirement handler for this - * @active tracker is called before returning. If the @active - * tracker is idle, the function returns immediately. - */ -static inline int __must_check -i915_active_request_retire(struct i915_active_request *active, - struct mutex *mutex) +static inline void +i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) { - struct i915_request *request; - long ret; - - request = i915_active_request_raw(active, mutex); - if (!request) - return 0; - - ret = i915_request_wait(request, - I915_WAIT_INTERRUPTIBLE, - MAX_SCHEDULE_TIMEOUT); - if (ret < 0) - return ret; + struct i915_active_fence *active = + container_of(cb, typeof(*active), cb); - list_del_init(&active->link); - RCU_INIT_POINTER(active->request, NULL); - - active->retire(active, request); - - return 0; + RCU_INIT_POINTER(active->fence, NULL); } /* @@ -358,34 +162,40 @@ i915_active_request_retire(struct i915_active_request *active, * synchronisation. */ -void __i915_active_init(struct drm_i915_private *i915, - struct i915_active *ref, +void __i915_active_init(struct i915_active *ref, int (*active)(struct i915_active *ref), void (*retire)(struct i915_active *ref), struct lock_class_key *key); -#define i915_active_init(i915, ref, active, retire) do { \ +#define i915_active_init(ref, active, retire) do { \ static struct lock_class_key __key; \ \ - __i915_active_init(i915, ref, active, retire, &__key); \ + __i915_active_init(ref, active, retire, &__key); \ } while (0) int i915_active_ref(struct i915_active *ref, struct intel_timeline *tl, - struct i915_request *rq); + struct dma_fence *fence); + +static inline int +i915_active_add_request(struct i915_active *ref, struct i915_request *rq) +{ + return i915_active_ref(ref, i915_request_timeline(rq), &rq->fence); +} + +void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f); + +static inline bool i915_active_has_exclusive(struct i915_active *ref) +{ + return rcu_access_pointer(ref->excl.fence); +} int i915_active_wait(struct i915_active *ref); -int i915_request_await_active(struct i915_request *rq, - struct i915_active *ref); -int i915_request_await_active_request(struct i915_request *rq, - struct i915_active_request *active); +int i915_request_await_active(struct i915_request *rq, struct i915_active *ref); int i915_active_acquire(struct i915_active *ref); +bool i915_active_acquire_if_busy(struct i915_active *ref); void i915_active_release(struct i915_active *ref); -void __i915_active_release_nested(struct i915_active *ref, int subclass); - -bool i915_active_trygrab(struct i915_active *ref); -void i915_active_ungrab(struct i915_active *ref); static inline bool i915_active_is_idle(const struct i915_active *ref) @@ -404,4 +214,6 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref, void i915_active_acquire_barrier(struct i915_active *ref); void i915_request_add_active_barriers(struct i915_request *rq); +void i915_active_print(struct i915_active *ref, struct drm_printer *m); + #endif /* _I915_ACTIVE_H_ */ diff --git a/drivers/gpu/drm/i915/i915_active_types.h b/drivers/gpu/drm/i915/i915_active_types.h index 1854e7d168c1..96aed0ee700a 100644 --- a/drivers/gpu/drm/i915/i915_active_types.h +++ b/drivers/gpu/drm/i915/i915_active_types.h @@ -8,22 +8,18 @@ #define _I915_ACTIVE_TYPES_H_ #include <linux/atomic.h> +#include <linux/dma-fence.h> #include <linux/llist.h> #include <linux/mutex.h> #include <linux/rbtree.h> #include <linux/rcupdate.h> +#include <linux/workqueue.h> -struct drm_i915_private; -struct i915_active_request; -struct i915_request; +#include "i915_utils.h" -typedef void (*i915_active_retire_fn)(struct i915_active_request *, - struct i915_request *); - -struct i915_active_request { - struct i915_request __rcu *request; - struct list_head link; - i915_active_retire_fn retire; +struct i915_active_fence { + struct dma_fence __rcu *fence; + struct dma_fence_cb cb; #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) /* * Incorporeal! @@ -43,20 +39,30 @@ struct i915_active_request { struct active_node; +#define I915_ACTIVE_MAY_SLEEP BIT(0) + +#define __i915_active_call __aligned(4) +#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2) + struct i915_active { - struct drm_i915_private *i915; + atomic_t count; + struct mutex mutex; + spinlock_t tree_lock; struct active_node *cache; struct rb_root tree; - struct mutex mutex; - atomic_t count; + + /* Preallocated "exclusive" node */ + struct i915_active_fence excl; unsigned long flags; -#define I915_ACTIVE_GRAB_BIT 0 +#define I915_ACTIVE_RETIRE_SLEEPS BIT(0) int (*active)(struct i915_active *ref); void (*retire)(struct i915_active *ref); + struct work_struct work; + struct llist_head preallocated_barriers; }; diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c index fe1871d7c126..e9d4200ce3bc 100644 --- a/drivers/gpu/drm/i915/i915_buddy.c +++ b/drivers/gpu/drm/i915/i915_buddy.c @@ -38,6 +38,7 @@ int __init i915_global_buddy_init(void) if (!global.slab_blocks) return -ENOMEM; + i915_global_register(&global.base); return 0; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index b0f51591f2e4..8016484ebcd3 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -41,7 +41,10 @@ #include "gem/i915_gem_context.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_gt_requests.h" #include "gt/intel_reset.h" +#include "gt/intel_rc6.h" +#include "gt/intel_rps.h" #include "gt/uc/intel_guc_submission.h" #include "i915_debugfs.h" @@ -61,11 +64,18 @@ static int i915_capabilities(struct seq_file *m, void *data) struct drm_i915_private *dev_priv = node_to_i915(m->private); const struct intel_device_info *info = INTEL_INFO(dev_priv); struct drm_printer p = drm_seq_file_printer(m); + const char *msg; seq_printf(m, "gen: %d\n", INTEL_GEN(dev_priv)); seq_printf(m, "platform: %s\n", intel_platform_name(info->platform)); seq_printf(m, "pch: %d\n", INTEL_PCH_TYPE(dev_priv)); + msg = "n/a"; +#ifdef CONFIG_INTEL_IOMMU + msg = enableddisabled(intel_iommu_gfx_mapped); +#endif + seq_printf(m, "iommu: %s\n", msg); + intel_device_info_dump_flags(info, &p); intel_device_info_dump_runtime(RUNTIME_INFO(dev_priv), &p); intel_driver_caps_print(&dev_priv->caps, &p); @@ -77,11 +87,6 @@ static int i915_capabilities(struct seq_file *m, void *data) return 0; } -static char get_pin_flag(struct drm_i915_gem_object *obj) -{ - return obj->pin_global ? 'p' : ' '; -} - static char get_tiling_flag(struct drm_i915_gem_object *obj) { switch (i915_gem_object_get_tiling(obj)) { @@ -140,9 +145,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) struct i915_vma *vma; int pin_count = 0; - seq_printf(m, "%pK: %c%c%c%c %8zdKiB %02x %02x %s%s%s", + seq_printf(m, "%pK: %c%c%c %8zdKiB %02x %02x %s%s%s", &obj->base, - get_pin_flag(obj), get_tiling_flag(obj), get_global_flag(obj), get_pin_mapped_flag(obj), @@ -221,8 +225,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) seq_printf(m, " (pinned x %d)", pin_count); if (obj->stolen) seq_printf(m, " (stolen: %08llx)", obj->stolen->start); - if (obj->pin_global) - seq_printf(m, " (global)"); + if (i915_gem_object_is_framebuffer(obj)) + seq_printf(m, " (fb)"); engine = i915_gem_object_last_write_engine(obj); if (engine) @@ -243,6 +247,9 @@ static int per_file_stats(int id, void *ptr, void *data) struct file_stats *stats = data; struct i915_vma *vma; + if (!kref_get_unless_zero(&obj->base.refcount)) + return 0; + stats->count++; stats->total += obj->base.size; if (!atomic_read(&obj->bind_count)) @@ -290,6 +297,7 @@ static int per_file_stats(int id, void *ptr, void *data) } spin_unlock(&obj->vma.lock); + i915_gem_object_put(obj); return 0; } @@ -309,34 +317,44 @@ static void print_context_stats(struct seq_file *m, struct drm_i915_private *i915) { struct file_stats kstats = {}; - struct i915_gem_context *ctx; + struct i915_gem_context *ctx, *cn; - list_for_each_entry(ctx, &i915->contexts.list, link) { + spin_lock(&i915->gem.contexts.lock); + list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { struct i915_gem_engines_iter it; struct intel_context *ce; + if (!kref_get_unless_zero(&ctx->ref)) + continue; + + spin_unlock(&i915->gem.contexts.lock); + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { intel_context_lock_pinned(ce); if (intel_context_is_pinned(ce)) { + rcu_read_lock(); if (ce->state) per_file_stats(0, ce->state->obj, &kstats); per_file_stats(0, ce->ring->vma->obj, &kstats); + rcu_read_unlock(); } intel_context_unlock_pinned(ce); } i915_gem_context_unlock_engines(ctx); if (!IS_ERR_OR_NULL(ctx->file_priv)) { - struct file_stats stats = { .vm = ctx->vm, }; + struct file_stats stats = { + .vm = rcu_access_pointer(ctx->vm), + }; struct drm_file *file = ctx->file_priv->file; struct task_struct *task; char name[80]; - spin_lock(&file->table_lock); + rcu_read_lock(); idr_for_each(&file->object_idr, per_file_stats, &stats); - spin_unlock(&file->table_lock); + rcu_read_unlock(); rcu_read_lock(); task = pid_task(ctx->pid ?: file->pid, PIDTYPE_PID); @@ -346,7 +364,12 @@ static void print_context_stats(struct seq_file *m, print_file_stats(m, name, stats); } + + spin_lock(&i915->gem.contexts.lock); + list_safe_reset_next(ctx, cn, link); + i915_gem_context_put(ctx); } + spin_unlock(&i915->gem.contexts.lock); print_file_stats(m, "[k]contexts", kstats); } @@ -354,7 +377,6 @@ static void print_context_stats(struct seq_file *m, static int i915_gem_object_info(struct seq_file *m, void *data) { struct drm_i915_private *i915 = node_to_i915(m->private); - int ret; seq_printf(m, "%u shrinkable [%u free] objects, %llu bytes\n", i915->mm.shrink_count, @@ -363,12 +385,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) seq_putc(m, '\n'); - ret = mutex_lock_interruptible(&i915->drm.struct_mutex); - if (ret) - return ret; - print_context_stats(m, i915); - mutex_unlock(&i915->drm.struct_mutex); return 0; } @@ -376,7 +393,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) static void gen8_display_interrupt_info(struct seq_file *m) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - int pipe; + enum pipe pipe; for_each_pipe(dev_priv, pipe) { enum intel_display_power_domain power_domain; @@ -527,6 +544,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data) gen8_display_interrupt_info(m); } else if (IS_VALLEYVIEW(dev_priv)) { + intel_wakeref_t pref; + seq_printf(m, "Display IER:\t%08x\n", I915_READ(VLV_IER)); seq_printf(m, "Display IIR:\t%08x\n", @@ -537,7 +556,6 @@ static int i915_interrupt_info(struct seq_file *m, void *data) I915_READ(VLV_IMR)); for_each_pipe(dev_priv, pipe) { enum intel_display_power_domain power_domain; - intel_wakeref_t pref; power_domain = POWER_DOMAIN_PIPE(pipe); pref = intel_display_power_get_if_enabled(dev_priv, @@ -571,12 +589,14 @@ static int i915_interrupt_info(struct seq_file *m, void *data) seq_printf(m, "PM IMR:\t\t%08x\n", I915_READ(GEN6_PMIMR)); + pref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT); seq_printf(m, "Port hotplug:\t%08x\n", I915_READ(PORT_HOTPLUG_EN)); seq_printf(m, "DPFLIPSTAT:\t%08x\n", I915_READ(VLV_DPFLIPSTAT)); seq_printf(m, "DPINVGTT:\t%08x\n", I915_READ(DPINVGTT)); + intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, pref); } else if (!HAS_PCH_SPLIT(dev_priv)) { seq_printf(m, "Interrupt enable: %08x\n", @@ -772,7 +792,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); struct intel_uncore *uncore = &dev_priv->uncore; - struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_rps *rps = &dev_priv->gt.rps; intel_wakeref_t wakeref; int ret = 0; @@ -808,23 +828,23 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "DDR freq: %d MHz\n", dev_priv->mem_freq); seq_printf(m, "actual GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, (freq_sts >> 8) & 0xff)); + intel_gpu_freq(rps, (freq_sts >> 8) & 0xff)); seq_printf(m, "current GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->cur_freq)); + intel_gpu_freq(rps, rps->cur_freq)); seq_printf(m, "max GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->max_freq)); + intel_gpu_freq(rps, rps->max_freq)); seq_printf(m, "min GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->min_freq)); + intel_gpu_freq(rps, rps->min_freq)); seq_printf(m, "idle GPU freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->idle_freq)); + intel_gpu_freq(rps, rps->idle_freq)); seq_printf(m, "efficient (RPe) frequency: %d MHz\n", - intel_gpu_freq(dev_priv, rps->efficient_freq)); + intel_gpu_freq(rps, rps->efficient_freq)); } else if (INTEL_GEN(dev_priv) >= 6) { u32 rp_state_limits; u32 gt_perf_status; @@ -858,7 +878,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) else reqf >>= 25; } - reqf = intel_gpu_freq(dev_priv, reqf); + reqf = intel_gpu_freq(rps, reqf); rpmodectl = I915_READ(GEN6_RP_CONTROL); rpinclimit = I915_READ(GEN6_RP_UP_THRESHOLD); @@ -871,8 +891,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK; rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK; rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK; - cagf = intel_gpu_freq(dev_priv, - intel_get_cagf(dev_priv, rpstat)); + cagf = intel_gpu_freq(rps, intel_get_cagf(rps, rpstat)); intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); @@ -949,37 +968,37 @@ static int i915_frequency_info(struct seq_file *m, void *unused) max_freq *= (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", - intel_gpu_freq(dev_priv, max_freq)); + intel_gpu_freq(rps, max_freq)); max_freq = (rp_state_cap & 0xff00) >> 8; max_freq *= (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", - intel_gpu_freq(dev_priv, max_freq)); + intel_gpu_freq(rps, max_freq)); max_freq = (IS_GEN9_LP(dev_priv) ? rp_state_cap >> 16 : rp_state_cap >> 0) & 0xff; max_freq *= (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10 ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", - intel_gpu_freq(dev_priv, max_freq)); + intel_gpu_freq(rps, max_freq)); seq_printf(m, "Max overclocked frequency: %dMHz\n", - intel_gpu_freq(dev_priv, rps->max_freq)); + intel_gpu_freq(rps, rps->max_freq)); seq_printf(m, "Current freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->cur_freq)); + intel_gpu_freq(rps, rps->cur_freq)); seq_printf(m, "Actual freq: %d MHz\n", cagf); seq_printf(m, "Idle freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->idle_freq)); + intel_gpu_freq(rps, rps->idle_freq)); seq_printf(m, "Min freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->min_freq)); + intel_gpu_freq(rps, rps->min_freq)); seq_printf(m, "Boost freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->boost_freq)); + intel_gpu_freq(rps, rps->boost_freq)); seq_printf(m, "Max freq: %d MHz\n", - intel_gpu_freq(dev_priv, rps->max_freq)); + intel_gpu_freq(rps, rps->max_freq)); seq_printf(m, "efficient (RPe) frequency: %d MHz\n", - intel_gpu_freq(dev_priv, rps->efficient_freq)); + intel_gpu_freq(rps, rps->efficient_freq)); } else { seq_puts(m, "no P-state info available\n"); } @@ -992,91 +1011,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused) return ret; } -static void i915_instdone_info(struct drm_i915_private *dev_priv, - struct seq_file *m, - struct intel_instdone *instdone) -{ - int slice; - int subslice; - - seq_printf(m, "\t\tINSTDONE: 0x%08x\n", - instdone->instdone); - - if (INTEL_GEN(dev_priv) <= 3) - return; - - seq_printf(m, "\t\tSC_INSTDONE: 0x%08x\n", - instdone->slice_common); - - if (INTEL_GEN(dev_priv) <= 6) - return; - - for_each_instdone_slice_subslice(dev_priv, slice, subslice) - seq_printf(m, "\t\tSAMPLER_INSTDONE[%d][%d]: 0x%08x\n", - slice, subslice, instdone->sampler[slice][subslice]); - - for_each_instdone_slice_subslice(dev_priv, slice, subslice) - seq_printf(m, "\t\tROW_INSTDONE[%d][%d]: 0x%08x\n", - slice, subslice, instdone->row[slice][subslice]); -} - -static int i915_hangcheck_info(struct seq_file *m, void *unused) -{ - struct drm_i915_private *i915 = node_to_i915(m->private); - struct intel_gt *gt = &i915->gt; - struct intel_engine_cs *engine; - intel_wakeref_t wakeref; - enum intel_engine_id id; - - seq_printf(m, "Reset flags: %lx\n", gt->reset.flags); - if (test_bit(I915_WEDGED, >->reset.flags)) - seq_puts(m, "\tWedged\n"); - if (test_bit(I915_RESET_BACKOFF, >->reset.flags)) - seq_puts(m, "\tDevice (global) reset in progress\n"); - - if (!i915_modparams.enable_hangcheck) { - seq_puts(m, "Hangcheck disabled\n"); - return 0; - } - - if (timer_pending(>->hangcheck.work.timer)) - seq_printf(m, "Hangcheck active, timer fires in %dms\n", - jiffies_to_msecs(gt->hangcheck.work.timer.expires - - jiffies)); - else if (delayed_work_pending(>->hangcheck.work)) - seq_puts(m, "Hangcheck active, work pending\n"); - else - seq_puts(m, "Hangcheck inactive\n"); - - seq_printf(m, "GT active? %s\n", yesno(gt->awake)); - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - for_each_engine(engine, i915, id) { - struct intel_instdone instdone; - - seq_printf(m, "%s: %d ms ago\n", - engine->name, - jiffies_to_msecs(jiffies - - engine->hangcheck.action_timestamp)); - - seq_printf(m, "\tACTHD = 0x%08llx [current 0x%08llx]\n", - (long long)engine->hangcheck.acthd, - intel_engine_get_active_head(engine)); - - intel_engine_get_instdone(engine, &instdone); - - seq_puts(m, "\tinstdone read =\n"); - i915_instdone_info(i915, m, &instdone); - - seq_puts(m, "\tinstdone accu =\n"); - i915_instdone_info(i915, m, - &engine->hangcheck.instdone); - } - } - - return 0; -} - static int ironlake_drpc_info(struct seq_file *m) { struct drm_i915_private *i915 = node_to_i915(m->private); @@ -1157,11 +1091,13 @@ static void print_rc6_res(struct seq_file *m, const char *title, const i915_reg_t reg) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); + struct drm_i915_private *i915 = node_to_i915(m->private); + intel_wakeref_t wakeref; - seq_printf(m, "%s %u (%llu us)\n", - title, I915_READ(reg), - intel_rc6_residency_us(dev_priv, reg)); + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + seq_printf(m, "%s %u (%llu us)\n", title, + intel_uncore_read(&i915->uncore, reg), + intel_rc6_residency_us(&i915->gt.rc6, reg)); } static int vlv_drpc_info(struct seq_file *m) @@ -1439,7 +1375,7 @@ static int i915_sr_status(struct seq_file *m, void *unused) static int i915_ring_freq_table(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_rps *rps = &dev_priv->gt.rps; unsigned int max_gpu_freq, min_gpu_freq; intel_wakeref_t wakeref; int gpu_freq, ia_freq; @@ -1464,10 +1400,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) GEN6_PCODE_READ_MIN_FREQ_TABLE, &ia_freq, NULL); seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", - intel_gpu_freq(dev_priv, (gpu_freq * - (IS_GEN9_BC(dev_priv) || - INTEL_GEN(dev_priv) >= 10 ? - GEN9_FREQ_SCALER : 1))), + intel_gpu_freq(rps, + (gpu_freq * + (IS_GEN9_BC(dev_priv) || + INTEL_GEN(dev_priv) >= 10 ? + GEN9_FREQ_SCALER : 1))), ((ia_freq >> 0) & 0xff) * 100, ((ia_freq >> 8) & 0xff) * 100); } @@ -1478,21 +1415,11 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) static int i915_opregion(struct seq_file *m, void *unused) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct intel_opregion *opregion = &dev_priv->opregion; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - goto out; + struct intel_opregion *opregion = &node_to_i915(m->private)->opregion; if (opregion->header) seq_write(m, opregion->header, OPREGION_SIZE); - mutex_unlock(&dev->struct_mutex); - -out: return 0; } @@ -1512,11 +1439,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) struct drm_device *dev = &dev_priv->drm; struct intel_framebuffer *fbdev_fb = NULL; struct drm_framebuffer *drm_fb; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; #ifdef CONFIG_DRM_FBDEV_EMULATION if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) { @@ -1551,7 +1473,6 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data) seq_putc(m, '\n'); } mutex_unlock(&dev->mode_config.fb_lock); - mutex_unlock(&dev->struct_mutex); return 0; } @@ -1564,23 +1485,20 @@ static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring) static int i915_context_status(struct seq_file *m, void *unused) { - struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct drm_device *dev = &dev_priv->drm; - struct i915_gem_context *ctx; - int ret; - - ret = mutex_lock_interruptible(&dev->struct_mutex); - if (ret) - return ret; + struct drm_i915_private *i915 = node_to_i915(m->private); + struct i915_gem_context *ctx, *cn; - list_for_each_entry(ctx, &dev_priv->contexts.list, link) { + spin_lock(&i915->gem.contexts.lock); + list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { struct i915_gem_engines_iter it; struct intel_context *ce; + if (!kref_get_unless_zero(&ctx->ref)) + continue; + + spin_unlock(&i915->gem.contexts.lock); + seq_puts(m, "HW context "); - if (!list_empty(&ctx->hw_id_link)) - seq_printf(m, "%x [pin %u]", ctx->hw_id, - atomic_read(&ctx->hw_id_pin_count)); if (ctx->pid) { struct task_struct *task; @@ -1614,9 +1532,12 @@ static int i915_context_status(struct seq_file *m, void *unused) i915_gem_context_unlock_engines(ctx); seq_putc(m, '\n'); - } - mutex_unlock(&dev->struct_mutex); + spin_lock(&i915->gem.contexts.lock); + list_safe_reset_next(ctx, cn, link); + i915_gem_context_put(ctx); + } + spin_unlock(&i915->gem.contexts.lock); return 0; } @@ -1654,9 +1575,9 @@ static int i915_swizzle_info(struct seq_file *m, void *data) wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "bit6 swizzle for X-tiling = %s\n", - swizzle_string(dev_priv->mm.bit_6_swizzle_x)); + swizzle_string(dev_priv->ggtt.bit_6_swizzle_x)); seq_printf(m, "bit6 swizzle for Y-tiling = %s\n", - swizzle_string(dev_priv->mm.bit_6_swizzle_y)); + swizzle_string(dev_priv->ggtt.bit_6_swizzle_y)); if (IS_GEN_RANGE(dev_priv, 3, 4)) { seq_printf(m, "DDC = 0x%08x\n", @@ -1711,7 +1632,7 @@ static const char *rps_power_to_str(unsigned int power) static int i915_rps_boost_info(struct seq_file *m, void *data) { struct drm_i915_private *dev_priv = node_to_i915(m->private); - struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_rps *rps = &dev_priv->gt.rps; u32 act_freq = rps->cur_freq; intel_wakeref_t wakeref; @@ -1723,7 +1644,7 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) vlv_punit_put(dev_priv); act_freq = (act_freq >> 8) & 0xff; } else { - act_freq = intel_get_cagf(dev_priv, + act_freq = intel_get_cagf(rps, I915_READ(GEN6_RPSTAT1)); } } @@ -1734,17 +1655,17 @@ static int i915_rps_boost_info(struct seq_file *m, void *data) atomic_read(&rps->num_waiters)); seq_printf(m, "Interactive? %d\n", READ_ONCE(rps->power.interactive)); seq_printf(m, "Frequency requested %d, actual %d\n", - intel_gpu_freq(dev_priv, rps->cur_freq), - intel_gpu_freq(dev_priv, act_freq)); + intel_gpu_freq(rps, rps->cur_freq), + intel_gpu_freq(rps, act_freq)); seq_printf(m, " min hard:%d, soft:%d; max soft:%d, hard:%d\n", - intel_gpu_freq(dev_priv, rps->min_freq), - intel_gpu_freq(dev_priv, rps->min_freq_softlimit), - intel_gpu_freq(dev_priv, rps->max_freq_softlimit), - intel_gpu_freq(dev_priv, rps->max_freq)); + intel_gpu_freq(rps, rps->min_freq), + intel_gpu_freq(rps, rps->min_freq_softlimit), + intel_gpu_freq(rps, rps->max_freq_softlimit), + intel_gpu_freq(rps, rps->max_freq)); seq_printf(m, " idle:%d, efficient:%d, boost:%d\n", - intel_gpu_freq(dev_priv, rps->idle_freq), - intel_gpu_freq(dev_priv, rps->efficient_freq), - intel_gpu_freq(dev_priv, rps->boost_freq)); + intel_gpu_freq(rps, rps->idle_freq), + intel_gpu_freq(rps, rps->efficient_freq), + intel_gpu_freq(rps, rps->boost_freq)); seq_printf(m, "Wait boosts: %d\n", atomic_read(&rps->boosts)); @@ -1860,8 +1781,8 @@ static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log = &dev_priv->gt.uc.guc.log; enum guc_log_buffer_type type; - if (!intel_guc_log_relay_enabled(log)) { - seq_puts(m, "GuC log relay disabled\n"); + if (!intel_guc_log_relay_created(log)) { + seq_puts(m, "GuC log relay not created\n"); return; } @@ -2048,9 +1969,23 @@ i915_guc_log_relay_write(struct file *filp, loff_t *ppos) { struct intel_guc_log *log = filp->private_data; + int val; + int ret; - intel_guc_log_relay_flush(log); - return cnt; + ret = kstrtoint_from_user(ubuf, cnt, 0, &val); + if (ret < 0) + return ret; + + /* + * Enable and start the guc log relay on value of 1. + * Flush log relay for any other value. + */ + if (val == 1) + ret = intel_guc_log_relay_start(log); + else + intel_guc_log_relay_flush(log); + + return ret ?: cnt; } static int i915_guc_log_relay_release(struct inode *inode, struct file *file) @@ -2133,7 +2068,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) "BUF_ON", "TG_ON" }; - val = I915_READ(EDP_PSR2_STATUS); + val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder)); status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >> EDP_PSR2_STATUS_STATE_SHIFT; if (status_val < ARRAY_SIZE(live_status)) @@ -2149,7 +2084,7 @@ psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m) "SRDOFFACK", "SRDENT_ON", }; - val = I915_READ(EDP_PSR_STATUS); + val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder)); status_val = (val & EDP_PSR_STATUS_STATE_MASK) >> EDP_PSR_STATUS_STATE_SHIFT; if (status_val < ARRAY_SIZE(live_status)) @@ -2188,14 +2123,18 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) status = "disabled"; seq_printf(m, "PSR mode: %s\n", status); - if (!psr->enabled) + if (!psr->enabled) { + seq_printf(m, "PSR sink not reliable: %s\n", + yesno(psr->sink_not_reliable)); + goto unlock; + } if (psr->psr2_enabled) { - val = I915_READ(EDP_PSR2_CTL); + val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)); enabled = val & EDP_PSR2_ENABLE; } else { - val = I915_READ(EDP_PSR_CTL); + val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)); enabled = val & EDP_PSR_ENABLE; } seq_printf(m, "Source PSR ctl: %s [0x%08x]\n", @@ -2208,7 +2147,8 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) * SKL+ Perf counter is reset to 0 everytime DC state is entered */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - val = I915_READ(EDP_PSR_PERF_CNT) & EDP_PSR_PERF_CNT_MASK; + val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder)); + val &= EDP_PSR_PERF_CNT_MASK; seq_printf(m, "Performance counter: %u\n", val); } @@ -2226,8 +2166,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) * Reading all 3 registers before hand to minimize crossing a * frame boundary between register reads */ - for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) - su_frames_val[frame / 3] = I915_READ(PSR2_SU_STATUS(frame)); + for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) { + val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder, + frame)); + su_frames_val[frame / 3] = val; + } seq_puts(m, "Frame:\tPSR2 SU blocks:\n"); @@ -2360,8 +2303,7 @@ static int i915_power_domain_info(struct seq_file *m, void *unused) for_each_power_domain(power_domain, power_well->desc->domains) seq_printf(m, " %-23s %d\n", - intel_display_power_domain_str(dev_priv, - power_domain), + intel_display_power_domain_str(power_domain), power_domains->domain_use_count[power_domain]); } @@ -2396,6 +2338,13 @@ static int i915_dmc_info(struct seq_file *m, void *unused) if (INTEL_GEN(dev_priv) >= 12) { dc5_reg = TGL_DMC_DEBUG_DC5_COUNT; dc6_reg = TGL_DMC_DEBUG_DC6_COUNT; + /* + * NOTE: DMC_DEBUG3 is a general purpose reg. + * According to B.Specs:49196 DMC f/w reuses DC5/6 counter + * reg for DC3CO debugging and validation, + * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter. + */ + seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3)); } else { dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT : SKL_CSR_DC3_DC5_COUNT; @@ -3110,8 +3059,9 @@ static int i915_dp_mst_info(struct seq_file *m, void *unused) if (!intel_dig_port->dp.can_mst) continue; - seq_printf(m, "MST Source Port %c\n", - port_name(intel_dig_port->base.port)); + seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n", + intel_dig_port->base.base.base.id, + intel_dig_port->base.base.name); drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr); } drm_connector_list_iter_end(&conn_iter); @@ -3573,6 +3523,37 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, i915_wedged_get, i915_wedged_set, "%llu\n"); +static int +i915_perf_noa_delay_set(void *data, u64 val) +{ + struct drm_i915_private *i915 = data; + const u32 clk = RUNTIME_INFO(i915)->cs_timestamp_frequency_khz; + + /* + * This would lead to infinite waits as we're doing timestamp + * difference on the CS with only 32bits. + */ + if (val > mul_u32_u32(U32_MAX, clk)) + return -EINVAL; + + atomic64_set(&i915->perf.noa_programming_delay, val); + return 0; +} + +static int +i915_perf_noa_delay_get(void *data, u64 *val) +{ + struct drm_i915_private *i915 = data; + + *val = atomic64_read(&i915->perf.noa_programming_delay); + return 0; +} + +DEFINE_SIMPLE_ATTRIBUTE(i915_perf_noa_delay_fops, + i915_perf_noa_delay_get, + i915_perf_noa_delay_set, + "%llu\n"); + #define DROP_UNBOUND BIT(0) #define DROP_BOUND BIT(1) #define DROP_RETIRE BIT(2) @@ -3582,6 +3563,7 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, #define DROP_IDLE BIT(6) #define DROP_RESET_ACTIVE BIT(7) #define DROP_RESET_SEQNO BIT(8) +#define DROP_RCU BIT(9) #define DROP_ALL (DROP_UNBOUND | \ DROP_BOUND | \ DROP_RETIRE | \ @@ -3590,7 +3572,8 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_wedged_fops, DROP_SHRINK_ALL |\ DROP_IDLE | \ DROP_RESET_ACTIVE | \ - DROP_RESET_SEQNO) + DROP_RESET_SEQNO | \ + DROP_RCU) static int i915_drop_caches_get(void *data, u64 *val) { @@ -3598,58 +3581,48 @@ i915_drop_caches_get(void *data, u64 *val) return 0; } - static int -i915_drop_caches_set(void *data, u64 val) +gt_drop_caches(struct intel_gt *gt, u64 val) { - struct drm_i915_private *i915 = data; - - DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", - val, val & DROP_ALL); + int ret; if (val & DROP_RESET_ACTIVE && - wait_for(intel_engines_are_idle(&i915->gt), - I915_IDLE_ENGINES_TIMEOUT)) - intel_gt_set_wedged(&i915->gt); + wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT)) + intel_gt_set_wedged(gt); - /* No need to check and wait for gpu resets, only libdrm auto-restarts - * on ioctls on -EAGAIN. */ - if (val & (DROP_ACTIVE | DROP_IDLE | DROP_RETIRE | DROP_RESET_SEQNO)) { - int ret; + if (val & DROP_RETIRE) + intel_gt_retire_requests(gt); - ret = mutex_lock_interruptible(&i915->drm.struct_mutex); + if (val & (DROP_IDLE | DROP_ACTIVE)) { + ret = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); if (ret) return ret; + } - /* - * To finish the flush of the idle_worker, we must complete - * the switch-to-kernel-context, which requires a double - * pass through wait_for_idle: first queues the switch, - * second waits for the switch. - */ - if (ret == 0 && val & (DROP_IDLE | DROP_ACTIVE)) - ret = i915_gem_wait_for_idle(i915, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); + if (val & DROP_IDLE) { + ret = intel_gt_pm_wait_for_idle(gt); + if (ret) + return ret; + } - if (ret == 0 && val & DROP_IDLE) - ret = i915_gem_wait_for_idle(i915, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); + if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(gt)) + intel_gt_handle_error(gt, ALL_ENGINES, 0, NULL); - if (val & DROP_RETIRE) - i915_retire_requests(i915); + return 0; +} - mutex_unlock(&i915->drm.struct_mutex); +static int +i915_drop_caches_set(void *data, u64 val) +{ + struct drm_i915_private *i915 = data; + int ret; - if (ret == 0 && val & DROP_IDLE) - ret = intel_gt_pm_wait_for_idle(&i915->gt); - } + DRM_DEBUG("Dropping caches: 0x%08llx [0x%08llx]\n", + val, val & DROP_ALL); - if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt)) - intel_gt_handle_error(&i915->gt, ALL_ENGINES, 0, NULL); + ret = gt_drop_caches(&i915->gt, val); + if (ret) + return ret; fs_reclaim_acquire(GFP_KERNEL); if (val & DROP_BOUND) @@ -3662,10 +3635,8 @@ i915_drop_caches_set(void *data, u64 val) i915_gem_shrink_all(i915); fs_reclaim_release(GFP_KERNEL); - if (val & DROP_IDLE) { - flush_delayed_work(&i915->gem.retire_work); - flush_work(&i915->gem.idle_work); - } + if (val & DROP_RCU) + rcu_barrier(); if (val & DROP_FREED) i915_gem_drain_freed_objects(i915); @@ -3721,6 +3692,15 @@ i915_cache_sharing_set(void *data, u64 val) return 0; } +static void +intel_sseu_copy_subslices(const struct sseu_dev_info *sseu, int slice, + u8 *to_mask) +{ + int offset = slice * sseu->ss_stride; + + memcpy(&to_mask[offset], &sseu->subslice_mask[offset], sseu->ss_stride); +} + DEFINE_SIMPLE_ATTRIBUTE(i915_cache_sharing_fops, i915_cache_sharing_get, i915_cache_sharing_set, "%llu\n"); @@ -3794,12 +3774,13 @@ static void gen10_sseu_device_status(struct drm_i915_private *dev_priv, continue; sseu->slice_mask |= BIT(s); - sseu->subslice_mask[s] = info->sseu.subslice_mask[s]; + intel_sseu_copy_subslices(&info->sseu, s, sseu->subslice_mask); for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; - if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) + if (info->sseu.has_subslice_pg && + !(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) /* skip disabled subslice */ continue; @@ -3845,18 +3826,21 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, sseu->slice_mask |= BIT(s); if (IS_GEN9_BC(dev_priv)) - sseu->subslice_mask[s] = - RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; + intel_sseu_copy_subslices(&info->sseu, s, + sseu->subslice_mask); for (ss = 0; ss < info->sseu.max_subslices; ss++) { unsigned int eu_cnt; + u8 ss_idx = s * info->sseu.ss_stride + + ss / BITS_PER_BYTE; if (IS_GEN9_LP(dev_priv)) { if (!(s_reg[s] & (GEN9_PGCTL_SS_ACK(ss)))) /* skip disabled subslice */ continue; - sseu->subslice_mask[s] |= BIT(ss); + sseu->subslice_mask[ss_idx] |= + BIT(ss % BITS_PER_BYTE); } eu_cnt = 2 * hweight32(eu_reg[2*s + ss/2] & @@ -3873,25 +3857,23 @@ static void gen9_sseu_device_status(struct drm_i915_private *dev_priv, static void broadwell_sseu_device_status(struct drm_i915_private *dev_priv, struct sseu_dev_info *sseu) { + const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); int s; sseu->slice_mask = slice_info & GEN8_LSLICESTAT_MASK; if (sseu->slice_mask) { - sseu->eu_per_subslice = - RUNTIME_INFO(dev_priv)->sseu.eu_per_subslice; - for (s = 0; s < fls(sseu->slice_mask); s++) { - sseu->subslice_mask[s] = - RUNTIME_INFO(dev_priv)->sseu.subslice_mask[s]; - } + sseu->eu_per_subslice = info->sseu.eu_per_subslice; + for (s = 0; s < fls(sseu->slice_mask); s++) + intel_sseu_copy_subslices(&info->sseu, s, + sseu->subslice_mask); sseu->eu_total = sseu->eu_per_subslice * intel_sseu_subslice_total(sseu); /* subtract fused off EU(s) from enabled slice(s) */ for (s = 0; s < fls(sseu->slice_mask); s++) { - u8 subslice_7eu = - RUNTIME_INFO(dev_priv)->sseu.subslice_7eu[s]; + u8 subslice_7eu = info->sseu.subslice_7eu[s]; sseu->eu_total -= hweight8(subslice_7eu); } @@ -3938,6 +3920,7 @@ static void i915_print_sseu_info(struct seq_file *m, bool is_available_info, static int i915_sseu_status(struct seq_file *m, void *unused) { struct drm_i915_private *dev_priv = node_to_i915(m->private); + const struct intel_runtime_info *info = RUNTIME_INFO(dev_priv); struct sseu_dev_info sseu; intel_wakeref_t wakeref; @@ -3945,14 +3928,13 @@ static int i915_sseu_status(struct seq_file *m, void *unused) return -ENODEV; seq_puts(m, "SSEU Device Info\n"); - i915_print_sseu_info(m, true, &RUNTIME_INFO(dev_priv)->sseu); + i915_print_sseu_info(m, true, &info->sseu); seq_puts(m, "SSEU Device Status\n"); memset(&sseu, 0, sizeof(sseu)); - sseu.max_slices = RUNTIME_INFO(dev_priv)->sseu.max_slices; - sseu.max_subslices = RUNTIME_INFO(dev_priv)->sseu.max_subslices; - sseu.max_eus_per_subslice = - RUNTIME_INFO(dev_priv)->sseu.max_eus_per_subslice; + intel_sseu_set_info(&sseu, info->sseu.max_slices, + info->sseu.max_subslices, + info->sseu.max_eus_per_subslice); with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { if (IS_CHERRYVIEW(dev_priv)) @@ -3973,13 +3955,12 @@ static int i915_sseu_status(struct seq_file *m, void *unused) static int i915_forcewake_open(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; + struct intel_gt *gt = &i915->gt; - if (INTEL_GEN(i915) < 6) - return 0; - - file->private_data = - (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm); - intel_uncore_forcewake_user_get(&i915->uncore); + atomic_inc(>->user_wakeref); + intel_gt_pm_get(gt); + if (INTEL_GEN(i915) >= 6) + intel_uncore_forcewake_user_get(gt->uncore); return 0; } @@ -3987,13 +3968,12 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) static int i915_forcewake_release(struct inode *inode, struct file *file) { struct drm_i915_private *i915 = inode->i_private; + struct intel_gt *gt = &i915->gt; - if (INTEL_GEN(i915) < 6) - return 0; - - intel_uncore_forcewake_user_put(&i915->uncore); - intel_runtime_pm_put(&i915->runtime_pm, - (intel_wakeref_t)(uintptr_t)file->private_data); + if (INTEL_GEN(i915) >= 6) + intel_uncore_forcewake_user_put(&i915->uncore); + intel_gt_pm_put(gt); + atomic_dec(>->user_wakeref); return 0; } @@ -4302,7 +4282,6 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_guc_stage_pool", i915_guc_stage_pool, 0}, {"i915_huc_load_status", i915_huc_load_status_info, 0}, {"i915_frequency_info", i915_frequency_info, 0}, - {"i915_hangcheck_info", i915_hangcheck_info, 0}, {"i915_drpc_info", i915_drpc_info, 0}, {"i915_ring_freq_table", i915_ring_freq_table, 0}, {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0}, @@ -4339,6 +4318,7 @@ static const struct i915_debugfs_files { const char *name; const struct file_operations *fops; } i915_debugfs_files[] = { + {"i915_perf_noa_delay", &i915_perf_noa_delay_fops}, {"i915_wedged", &i915_wedged_fops}, {"i915_cache_sharing", &i915_cache_sharing_fops}, {"i915_gem_drop_caches", &i915_drop_caches_fops}, @@ -4528,7 +4508,7 @@ static int i915_dsc_fec_support_show(struct seq_file *m, void *data) intel_dp = enc_to_intel_dp(&intel_attached_encoder(connector)->base); crtc_state = to_intel_crtc_state(crtc->state); seq_printf(m, "DSC_Enabled: %s\n", - yesno(crtc_state->dsc_params.compression_enable)); + yesno(crtc_state->dsc.compression_enable)); seq_printf(m, "DSC_Sink_Support: %s\n", yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd))); seq_printf(m, "Force_DSC_Enable: %s\n", diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 3d717e282908..3c512c571e60 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -36,7 +36,6 @@ #include <linux/pm_runtime.h> #include <linux/pnp.h> #include <linux/slab.h> -#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <linux/vt.h> #include <acpi/video.h> @@ -54,16 +53,17 @@ #include "display/intel_display_types.h" #include "display/intel_dp.h" #include "display/intel_fbdev.h" -#include "display/intel_gmbus.h" #include "display/intel_hotplug.h" #include "display/intel_overlay.h" #include "display/intel_pipe_crc.h" #include "display/intel_sprite.h" +#include "display/intel_vga.h" #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_rc6.h" #include "i915_debugfs.h" #include "i915_drv.h" @@ -72,10 +72,12 @@ #include "i915_perf.h" #include "i915_query.h" #include "i915_suspend.h" +#include "i915_switcheroo.h" #include "i915_sysfs.h" #include "i915_trace.h" #include "i915_vgpu.h" #include "intel_csr.h" +#include "intel_memory_region.h" #include "intel_pm.h" static struct drm_driver driver; @@ -269,179 +271,97 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv) release_resource(&dev_priv->mch_res); } -/* true = enable decode, false = disable decoder */ -static unsigned int i915_vga_set_decode(void *cookie, bool state) +static int i915_driver_modeset_probe(struct drm_i915_private *i915) { - struct drm_i915_private *dev_priv = cookie; - - intel_modeset_vga_set_state(dev_priv, state); - if (state) - return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM | - VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; - else - return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; -} - -static int i915_resume_switcheroo(struct drm_i915_private *i915); -static int i915_suspend_switcheroo(struct drm_i915_private *i915, - pm_message_t state); - -static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) -{ - struct drm_i915_private *i915 = pdev_to_i915(pdev); - pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; - - if (!i915) { - dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n"); - return; - } - - if (state == VGA_SWITCHEROO_ON) { - pr_info("switched on\n"); - i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; - /* i915 resume handler doesn't set to D0 */ - pci_set_power_state(pdev, PCI_D0); - i915_resume_switcheroo(i915); - i915->drm.switch_power_state = DRM_SWITCH_POWER_ON; - } else { - pr_info("switched off\n"); - i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; - i915_suspend_switcheroo(i915, pmm); - i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF; - } -} - -static bool i915_switcheroo_can_switch(struct pci_dev *pdev) -{ - struct drm_i915_private *i915 = pdev_to_i915(pdev); - - /* - * FIXME: open_count is protected by drm_global_mutex but that would lead to - * locking inversion with the driver load path. And the access here is - * completely racy anyway. So don't bother with locking for now. - */ - return i915 && i915->drm.open_count == 0; -} - -static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { - .set_gpu_state = i915_switcheroo_set_state, - .reprobe = NULL, - .can_switch = i915_switcheroo_can_switch, -}; - -static int i915_driver_modeset_probe(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = to_i915(dev); - struct pci_dev *pdev = dev_priv->drm.pdev; int ret; - if (i915_inject_probe_failure(dev_priv)) + if (i915_inject_probe_failure(i915)) return -ENODEV; - if (HAS_DISPLAY(dev_priv)) { - ret = drm_vblank_init(&dev_priv->drm, - INTEL_INFO(dev_priv)->num_pipes); + if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) { + ret = drm_vblank_init(&i915->drm, + INTEL_NUM_PIPES(i915)); if (ret) goto out; } - intel_bios_init(dev_priv); + intel_bios_init(i915); - /* If we have > 1 VGA cards, then we need to arbitrate access - * to the common VGA resources. - * - * If we are a secondary display controller (!PCI_DISPLAY_CLASS_VGA), - * then we do not take part in VGA arbitration and the - * vga_client_register() fails with -ENODEV. - */ - ret = vga_client_register(pdev, dev_priv, NULL, i915_vga_set_decode); - if (ret && ret != -ENODEV) + ret = intel_vga_register(i915); + if (ret) goto out; intel_register_dsm_handler(); - ret = vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false); + ret = i915_switcheroo_register(i915); if (ret) goto cleanup_vga_client; - intel_power_domains_init_hw(dev_priv, false); + intel_power_domains_init_hw(i915, false); - intel_csr_ucode_init(dev_priv); + intel_csr_ucode_init(i915); - ret = intel_irq_install(dev_priv); + ret = intel_irq_install(i915); if (ret) goto cleanup_csr; - intel_gmbus_setup(dev_priv); - /* Important: The output setup functions called by modeset_init need * working irqs for e.g. gmbus and dp aux transfers. */ - ret = intel_modeset_init(dev); + ret = intel_modeset_init(i915); if (ret) goto cleanup_irq; - ret = i915_gem_init(dev_priv); + ret = i915_gem_init(i915); if (ret) goto cleanup_modeset; - intel_overlay_setup(dev_priv); + intel_overlay_setup(i915); - if (!HAS_DISPLAY(dev_priv)) + if (!HAS_DISPLAY(i915) || !INTEL_DISPLAY_ENABLED(i915)) return 0; - ret = intel_fbdev_init(dev); + ret = intel_fbdev_init(&i915->drm); if (ret) goto cleanup_gem; /* Only enable hotplug handling once the fbdev is fully set up. */ - intel_hpd_init(dev_priv); + intel_hpd_init(i915); - intel_init_ipc(dev_priv); + intel_init_ipc(i915); return 0; cleanup_gem: - i915_gem_suspend(dev_priv); - i915_gem_driver_remove(dev_priv); - i915_gem_driver_release(dev_priv); + i915_gem_suspend(i915); + i915_gem_driver_remove(i915); + i915_gem_driver_release(i915); cleanup_modeset: - intel_modeset_driver_remove(dev); + intel_modeset_driver_remove(i915); cleanup_irq: - intel_irq_uninstall(dev_priv); - intel_gmbus_teardown(dev_priv); + intel_irq_uninstall(i915); cleanup_csr: - intel_csr_ucode_fini(dev_priv); - intel_power_domains_driver_remove(dev_priv); - vga_switcheroo_unregister_client(pdev); + intel_csr_ucode_fini(i915); + intel_power_domains_driver_remove(i915); + i915_switcheroo_unregister(i915); cleanup_vga_client: - vga_client_register(pdev, NULL, NULL, NULL); + intel_vga_unregister(i915); out: return ret; } -static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +static void i915_driver_modeset_remove(struct drm_i915_private *i915) { - struct apertures_struct *ap; - struct pci_dev *pdev = dev_priv->drm.pdev; - struct i915_ggtt *ggtt = &dev_priv->ggtt; - bool primary; - int ret; - - ap = alloc_apertures(1); - if (!ap) - return -ENOMEM; + intel_modeset_driver_remove(i915); - ap->ranges[0].base = ggtt->gmadr.start; - ap->ranges[0].size = ggtt->mappable_end; + intel_irq_uninstall(i915); - primary = - pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; + intel_bios_driver_remove(i915); - ret = drm_fb_helper_remove_conflicting_framebuffers(ap, "inteldrmfb", primary); + i915_switcheroo_unregister(i915); - kfree(ap); + intel_vga_unregister(i915); - return ret; + intel_csr_ucode_fini(i915); } static void intel_init_dpio(struct drm_i915_private *dev_priv) @@ -598,9 +518,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) intel_gt_init_early(&dev_priv->gt, dev_priv); - ret = i915_gem_init_early(dev_priv); - if (ret < 0) - goto err_gt; + i915_gem_init_early(dev_priv); /* This must be called before any calls to HAS_PCH_* */ intel_detect_pch(dev_priv); @@ -622,7 +540,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) err_gem: i915_gem_cleanup_early(dev_priv); -err_gt: intel_gt_driver_late_release(&dev_priv->gt); vlv_free_s0ix_state(dev_priv); err_workqueues: @@ -680,12 +597,10 @@ static int i915_driver_mmio_probe(struct drm_i915_private *dev_priv) intel_uc_init_mmio(&dev_priv->gt.uc); - ret = intel_engines_init_mmio(dev_priv); + ret = intel_engines_init_mmio(&dev_priv->gt); if (ret) goto err_uncore; - i915_gem_init_mmio(dev_priv); - return 0; err_uncore: @@ -703,7 +618,7 @@ err_bridge: */ static void i915_driver_mmio_release(struct drm_i915_private *dev_priv) { - intel_engines_cleanup(dev_priv); + intel_engines_cleanup(&dev_priv->gt); intel_teardown_mchbar(dev_priv); intel_uncore_fini_mmio(&dev_priv->uncore); pci_dev_put(dev_priv->bridge_dev); @@ -1157,8 +1072,8 @@ intel_get_dram_info(struct drm_i915_private *dev_priv) static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap) { - const unsigned int ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; - const unsigned int sets[4] = { 1, 1, 2, 2 }; + static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 }; + static const u8 sets[4] = { 1, 1, 2, 2 }; return EDRAM_NUM_BANKS(cap) * ways[EDRAM_WAYS_IDX(cap)] * @@ -1246,32 +1161,24 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (ret) goto err_perf; - /* - * WARNING: Apparently we must kick fbdev drivers before vgacon, - * otherwise the vga fbdev driver falls over. - */ - ret = i915_kick_out_firmware_fb(dev_priv); - if (ret) { - DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb"); + if (ret) goto err_ggtt; - } - ret = vga_remove_vgacon(pdev); - if (ret) { - DRM_ERROR("failed to remove conflicting VGA console\n"); + ret = i915_ggtt_init_hw(dev_priv); + if (ret) goto err_ggtt; - } - ret = i915_ggtt_init_hw(dev_priv); + ret = intel_memory_regions_hw_probe(dev_priv); if (ret) goto err_ggtt; - intel_gt_init_hw(dev_priv); + intel_gt_init_hw_early(&dev_priv->gt, &dev_priv->ggtt); ret = i915_ggtt_enable_hw(dev_priv); if (ret) { DRM_ERROR("failed to enable GGTT\n"); - goto err_ggtt; + goto err_mem_regions; } pci_set_master(pdev); @@ -1288,7 +1195,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (ret) { DRM_ERROR("failed to set DMA mask\n"); - goto err_ggtt; + goto err_mem_regions; } } @@ -1306,16 +1213,13 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv) if (ret) { DRM_ERROR("failed to set DMA mask\n"); - goto err_ggtt; + goto err_mem_regions; } } pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); - /* BIOS often leaves RC6 enabled, but disable it for hw init */ - intel_sanitize_gt_powersave(dev_priv); - intel_gt_init_workarounds(dev_priv); /* On the 945G/GM, the chipset reports the MSI capability on the @@ -1361,6 +1265,8 @@ err_msi: if (pdev->msi_enabled) pci_disable_msi(pdev); pm_qos_remove_request(&dev_priv->pm_qos); +err_mem_regions: + intel_memory_regions_driver_release(dev_priv); err_ggtt: i915_ggtt_driver_release(dev_priv); err_perf: @@ -1415,14 +1321,13 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) } else DRM_ERROR("Failed to register driver for userspace access!\n"); - if (HAS_DISPLAY(dev_priv)) { + if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) { /* Must be done after probing outputs */ intel_opregion_register(dev_priv); acpi_video_register(); } - if (IS_GEN(dev_priv, 5)) - intel_gpu_ips_init(dev_priv); + intel_gt_driver_register(&dev_priv->gt); intel_audio_init(dev_priv); @@ -1439,7 +1344,7 @@ static void i915_driver_register(struct drm_i915_private *dev_priv) * We need to coordinate the hotplugs with the asynchronous fbdev * configuration, for which we use the fbdev->async_cookie. */ - if (HAS_DISPLAY(dev_priv)) + if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) drm_kms_helper_poll_init(dev); intel_power_domains_enable(dev_priv); @@ -1465,7 +1370,7 @@ static void i915_driver_unregister(struct drm_i915_private *dev_priv) */ drm_kms_helper_poll_fini(&dev_priv->drm); - intel_gpu_ips_teardown(); + intel_gt_driver_unregister(&dev_priv->gt); acpi_video_unregister(); intel_opregion_unregister(dev_priv); @@ -1574,6 +1479,23 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!i915_modparams.nuclear_pageflip && match_info->gen < 5) dev_priv->drm.driver_features &= ~DRIVER_ATOMIC; + /* + * Check if we support fake LMEM -- for now we only unleash this for + * the live selftests(test-and-exit). + */ +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) + if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) { + if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 && + i915_modparams.fake_lmem_start) { + mkwrite_device_info(dev_priv)->memory_regions = + REGION_SMEM | REGION_LMEM | REGION_STOLEN; + mkwrite_device_info(dev_priv)->is_dgfx = true; + GEM_BUG_ON(!HAS_LMEM(dev_priv)); + GEM_BUG_ON(!IS_DGFX(dev_priv)); + } + } +#endif + ret = pci_enable_device(pdev); if (ret) goto out_fini; @@ -1594,7 +1516,7 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret < 0) goto out_cleanup_mmio; - ret = i915_driver_modeset_probe(&dev_priv->drm); + ret = i915_driver_modeset_probe(dev_priv); if (ret < 0) goto out_cleanup_hw; @@ -1608,10 +1530,8 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent) out_cleanup_hw: i915_driver_hw_remove(dev_priv); + intel_memory_regions_driver_release(dev_priv); i915_ggtt_driver_release(dev_priv); - - /* Paranoia: make sure we have disabled everything before we exit. */ - intel_sanitize_gt_powersave(dev_priv); out_cleanup_mmio: i915_driver_mmio_release(dev_priv); out_runtime_pm_put: @@ -1627,8 +1547,6 @@ out_fini: void i915_driver_remove(struct drm_i915_private *i915) { - struct pci_dev *pdev = i915->drm.pdev; - disable_rpm_wakeref_asserts(&i915->runtime_pm); i915_driver_unregister(i915); @@ -1649,19 +1567,9 @@ void i915_driver_remove(struct drm_i915_private *i915) intel_gvt_driver_remove(i915); - intel_modeset_driver_remove(&i915->drm); - - intel_bios_driver_remove(i915); + i915_driver_modeset_remove(i915); - vga_switcheroo_unregister_client(pdev); - vga_client_register(pdev, NULL, NULL, NULL); - - intel_csr_ucode_fini(i915); - - /* Free error state after interrupts are fully disabled. */ - cancel_delayed_work_sync(&i915->gt.hangcheck.work); i915_reset_error_state(i915); - i915_gem_driver_remove(i915); intel_power_domains_driver_remove(i915); @@ -1680,11 +1588,9 @@ static void i915_driver_release(struct drm_device *dev) i915_gem_driver_release(dev_priv); + intel_memory_regions_driver_release(dev_priv); i915_ggtt_driver_release(dev_priv); - /* Paranoia: make sure we have disabled everything before we exit. */ - intel_sanitize_gt_powersave(dev_priv); - i915_driver_mmio_release(dev_priv); enable_rpm_wakeref_asserts(rpm); @@ -1728,12 +1634,10 @@ static void i915_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct drm_i915_file_private *file_priv = file->driver_priv; - mutex_lock(&dev->struct_mutex); i915_gem_context_close(file); i915_gem_release(dev, file); - mutex_unlock(&dev->struct_mutex); - kfree(file_priv); + kfree_rcu(file_priv, rcu); /* Catch up with all the deferred frees from "this" client */ i915_gem_flush_free_objects(to_i915(dev)); @@ -1847,8 +1751,6 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation) i915_gem_suspend_late(dev_priv); - i915_rc6_ctx_wa_suspend(dev_priv); - intel_uncore_suspend(&dev_priv->uncore); intel_power_domains_suspend(dev_priv, @@ -1890,8 +1792,7 @@ out: return ret; } -static int -i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state) +int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state) { int error; @@ -1915,18 +1816,17 @@ static int i915_drm_resume(struct drm_device *dev) int ret; disable_rpm_wakeref_asserts(&dev_priv->runtime_pm); - intel_sanitize_gt_powersave(dev_priv); - i915_gem_sanitize(dev_priv); + intel_rc6_ctx_wa_resume(&dev_priv->gt.rc6); + + intel_gt_sanitize(&dev_priv->gt, true); ret = i915_ggtt_enable_hw(dev_priv); if (ret) DRM_ERROR("failed to re-enable GGTT\n"); - mutex_lock(&dev_priv->drm.struct_mutex); i915_gem_restore_gtt_mappings(dev_priv); - i915_gem_restore_fences(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); + i915_gem_restore_fences(&dev_priv->ggtt); intel_csr_ucode_resume(dev_priv); @@ -1951,7 +1851,7 @@ static int i915_drm_resume(struct drm_device *dev) i915_gem_resume(dev_priv); - intel_modeset_init_hw(dev); + intel_modeset_init_hw(dev_priv); intel_init_clock_gating(dev_priv); spin_lock_irq(&dev_priv->irq_lock); @@ -2048,20 +1948,14 @@ static int i915_drm_resume_early(struct drm_device *dev) intel_display_power_resume_early(dev_priv); - intel_sanitize_gt_powersave(dev_priv); - intel_power_domains_resume(dev_priv); - i915_rc6_ctx_wa_resume(dev_priv); - - intel_gt_sanitize(&dev_priv->gt, true); - enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); return ret; } -static int i915_resume_switcheroo(struct drm_i915_private *i915) +int i915_resume_switcheroo(struct drm_i915_private *i915) { int ret; @@ -2594,9 +2488,6 @@ static int intel_runtime_suspend(struct device *kdev) struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; int ret = 0; - if (WARN_ON_ONCE(!(dev_priv->gt_pm.rc6.enabled && HAS_RC6(dev_priv)))) - return -ENODEV; - if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv))) return -ENODEV; @@ -2629,7 +2520,7 @@ static int intel_runtime_suspend(struct device *kdev) intel_gt_runtime_resume(&dev_priv->gt); - i915_gem_restore_fences(dev_priv); + i915_gem_restore_fences(&dev_priv->ggtt); enable_rpm_wakeref_asserts(rpm); @@ -2709,7 +2600,7 @@ static int intel_runtime_resume(struct device *kdev) * we can do is to hope that things will still work (and disable RPM). */ intel_gt_runtime_resume(&dev_priv->gt); - i915_gem_restore_fences(dev_priv); + i915_gem_restore_fences(&dev_priv->ggtt); /* * On VLV/CHV display interrupts are part of the display diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 89b6112bd66b..e29bc137e7ba 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -67,6 +67,7 @@ #include "display/intel_display.h" #include "display/intel_display_power.h" #include "display/intel_dpll_mgr.h" +#include "display/intel_dsb.h" #include "display/intel_frontbuffer.h" #include "display/intel_gmbus.h" #include "display/intel_opregion.h" @@ -84,6 +85,7 @@ #include "intel_device_info.h" #include "intel_pch.h" #include "intel_runtime_pm.h" +#include "intel_memory_region.h" #include "intel_uncore.h" #include "intel_wakeref.h" #include "intel_wopcm.h" @@ -92,12 +94,15 @@ #include "i915_gem_fence_reg.h" #include "i915_gem_gtt.h" #include "i915_gpu_error.h" +#include "i915_perf_types.h" #include "i915_request.h" #include "i915_scheduler.h" #include "gt/intel_timeline.h" #include "i915_vma.h" #include "i915_irq.h" +#include "intel_region_lmem.h" + #include "intel_gvt.h" /* General customization: @@ -105,8 +110,8 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20190822" -#define DRIVER_TIMESTAMP 1566477988 +#define DRIVER_DATE "20191101" +#define DRIVER_TIMESTAMP 1572604873 struct drm_i915_gem_object; @@ -185,7 +190,11 @@ struct i915_mmu_object; struct drm_i915_file_private { struct drm_i915_private *dev_priv; - struct drm_file *file; + + union { + struct drm_file *file; + struct rcu_head rcu; + }; struct { spinlock_t lock; @@ -272,6 +281,7 @@ struct drm_i915_display_funcs { int (*compute_global_watermarks)(struct intel_atomic_state *state); void (*update_wm)(struct intel_crtc *crtc); int (*modeset_calc_cdclk)(struct intel_atomic_state *state); + u8 (*calc_voltage_level)(int cdclk); /* Returns the active state of the crtc, and if the crtc is active, * fills out the pipe-config with the hw state. */ bool (*get_pipe_config)(struct intel_crtc *, @@ -284,7 +294,8 @@ struct drm_i915_display_funcs { struct intel_atomic_state *old_state); void (*crtc_disable)(struct intel_crtc_state *old_crtc_state, struct intel_atomic_state *old_state); - void (*update_crtcs)(struct intel_atomic_state *state); + void (*commit_modeset_enables)(struct intel_atomic_state *state); + void (*commit_modeset_disables)(struct intel_atomic_state *state); void (*audio_codec_enable)(struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); @@ -331,6 +342,7 @@ struct intel_csr { i915_reg_t mmioaddr[20]; u32 mmiodata[20]; u32 dc_state; + u32 target_dc_state; u32 allowed_dc_mask; intel_wakeref_t wakeref; }; @@ -479,6 +491,7 @@ struct i915_psr { bool enabled; struct intel_dp *dp; enum pipe pipe; + enum transcoder transcoder; bool active; struct work_struct work; unsigned busy_frontbuffer_bits; @@ -492,6 +505,9 @@ struct i915_psr { bool sink_not_reliable; bool irq_aux_error; u16 su_x_granularity; + bool dc3co_enabled; + u32 dc3co_exit_delay; + struct delayed_work idle_work; }; #define QUIRK_LVDS_SSC_DISABLE (1<<1) @@ -529,108 +545,6 @@ struct i915_suspend_saved_registers { struct vlv_s0ix_state; -struct intel_rps_ei { - ktime_t ktime; - u32 render_c0; - u32 media_c0; -}; - -struct intel_rps { - struct mutex lock; /* protects enabling and the worker */ - - /* - * work, interrupts_enabled and pm_iir are protected by - * dev_priv->irq_lock - */ - struct work_struct work; - bool interrupts_enabled; - u32 pm_iir; - - /* PM interrupt bits that should never be masked */ - u32 pm_intrmsk_mbz; - - /* Frequencies are stored in potentially platform dependent multiples. - * In other words, *_freq needs to be multiplied by X to be interesting. - * Soft limits are those which are used for the dynamic reclocking done - * by the driver (raise frequencies under heavy loads, and lower for - * lighter loads). Hard limits are those imposed by the hardware. - * - * A distinction is made for overclocking, which is never enabled by - * default, and is considered to be above the hard limit if it's - * possible at all. - */ - u8 cur_freq; /* Current frequency (cached, may not == HW) */ - u8 min_freq_softlimit; /* Minimum frequency permitted by the driver */ - u8 max_freq_softlimit; /* Max frequency permitted by the driver */ - u8 max_freq; /* Maximum frequency, RP0 if not overclocking */ - u8 min_freq; /* AKA RPn. Minimum frequency */ - u8 boost_freq; /* Frequency to request when wait boosting */ - u8 idle_freq; /* Frequency to request when we are idle */ - u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ - u8 rp1_freq; /* "less than" RP0 power/freqency */ - u8 rp0_freq; /* Non-overclocked max frequency. */ - u16 gpll_ref_freq; /* vlv/chv GPLL reference frequency */ - - int last_adj; - - struct { - struct mutex mutex; - - enum { LOW_POWER, BETWEEN, HIGH_POWER } mode; - unsigned int interactive; - - u8 up_threshold; /* Current %busy required to uplock */ - u8 down_threshold; /* Current %busy required to downclock */ - } power; - - bool enabled; - atomic_t num_waiters; - atomic_t boosts; - - /* manual wa residency calculations */ - struct intel_rps_ei ei; -}; - -struct intel_rc6 { - bool enabled; - bool ctx_corrupted; - intel_wakeref_t ctx_corrupted_wakeref; - u64 prev_hw_residency[4]; - u64 cur_residency[4]; -}; - -struct intel_llc_pstate { - bool enabled; -}; - -struct intel_gen6_power_mgmt { - struct intel_rps rps; - struct intel_rc6 rc6; - struct intel_llc_pstate llc_pstate; -}; - -/* defined intel_pm.c */ -extern spinlock_t mchdev_lock; - -struct intel_ilk_power_mgmt { - u8 cur_delay; - u8 min_delay; - u8 max_delay; - u8 fmax; - u8 fstart; - - u64 last_count1; - unsigned long last_time1; - unsigned long chipset_power; - u64 last_count2; - u64 last_time2; - unsigned long gfx_power; - u8 corr; - - int c_m; - int r_t; -}; - #define MAX_L3_SLICES 2 struct intel_l3_parity { u32 *remap_info[MAX_L3_SLICES]; @@ -679,6 +593,8 @@ struct i915_gem_mm { */ struct vfsmount *gemfs; + struct intel_memory_region *regions[INTEL_REGION_UNKNOWN]; + struct notifier_block oom_notifier; struct notifier_block vmap_notifier; struct shrinker shrinker; @@ -690,11 +606,6 @@ struct i915_gem_mm { */ struct workqueue_struct *userptr_wq; - /** Bit 6 swizzling required for X tiling */ - u32 bit_6_swizzle_x; - /** Bit 6 swizzling required for Y tiling */ - u32 bit_6_swizzle_y; - /* shrinker accounting, also useful for userland debugging */ u64 shrink_memory; u32 shrink_count; @@ -975,305 +886,6 @@ struct intel_wm_config { bool sprites_scaled; }; -struct i915_oa_format { - u32 format; - int size; -}; - -struct i915_oa_reg { - i915_reg_t addr; - u32 value; -}; - -struct i915_oa_config { - char uuid[UUID_STRING_LEN + 1]; - int id; - - const struct i915_oa_reg *mux_regs; - u32 mux_regs_len; - const struct i915_oa_reg *b_counter_regs; - u32 b_counter_regs_len; - const struct i915_oa_reg *flex_regs; - u32 flex_regs_len; - - struct attribute_group sysfs_metric; - struct attribute *attrs[2]; - struct device_attribute sysfs_metric_id; - - atomic_t ref_count; -}; - -struct i915_perf_stream; - -/** - * struct i915_perf_stream_ops - the OPs to support a specific stream type - */ -struct i915_perf_stream_ops { - /** - * @enable: Enables the collection of HW samples, either in response to - * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened - * without `I915_PERF_FLAG_DISABLED`. - */ - void (*enable)(struct i915_perf_stream *stream); - - /** - * @disable: Disables the collection of HW samples, either in response - * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying - * the stream. - */ - void (*disable)(struct i915_perf_stream *stream); - - /** - * @poll_wait: Call poll_wait, passing a wait queue that will be woken - * once there is something ready to read() for the stream - */ - void (*poll_wait)(struct i915_perf_stream *stream, - struct file *file, - poll_table *wait); - - /** - * @wait_unlocked: For handling a blocking read, wait until there is - * something to ready to read() for the stream. E.g. wait on the same - * wait queue that would be passed to poll_wait(). - */ - int (*wait_unlocked)(struct i915_perf_stream *stream); - - /** - * @read: Copy buffered metrics as records to userspace - * **buf**: the userspace, destination buffer - * **count**: the number of bytes to copy, requested by userspace - * **offset**: zero at the start of the read, updated as the read - * proceeds, it represents how many bytes have been copied so far and - * the buffer offset for copying the next record. - * - * Copy as many buffered i915 perf samples and records for this stream - * to userspace as will fit in the given buffer. - * - * Only write complete records; returning -%ENOSPC if there isn't room - * for a complete record. - * - * Return any error condition that results in a short read such as - * -%ENOSPC or -%EFAULT, even though these may be squashed before - * returning to userspace. - */ - int (*read)(struct i915_perf_stream *stream, - char __user *buf, - size_t count, - size_t *offset); - - /** - * @destroy: Cleanup any stream specific resources. - * - * The stream will always be disabled before this is called. - */ - void (*destroy)(struct i915_perf_stream *stream); -}; - -/** - * struct i915_perf_stream - state for a single open stream FD - */ -struct i915_perf_stream { - /** - * @dev_priv: i915 drm device - */ - struct drm_i915_private *dev_priv; - - /** - * @link: Links the stream into ``&drm_i915_private->streams`` - */ - struct list_head link; - - /** - * @wakeref: As we keep the device awake while the perf stream is - * active, we track our runtime pm reference for later release. - */ - intel_wakeref_t wakeref; - - /** - * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*` - * properties given when opening a stream, representing the contents - * of a single sample as read() by userspace. - */ - u32 sample_flags; - - /** - * @sample_size: Considering the configured contents of a sample - * combined with the required header size, this is the total size - * of a single sample record. - */ - int sample_size; - - /** - * @ctx: %NULL if measuring system-wide across all contexts or a - * specific context that is being monitored. - */ - struct i915_gem_context *ctx; - - /** - * @enabled: Whether the stream is currently enabled, considering - * whether the stream was opened in a disabled state and based - * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls. - */ - bool enabled; - - /** - * @ops: The callbacks providing the implementation of this specific - * type of configured stream. - */ - const struct i915_perf_stream_ops *ops; - - /** - * @oa_config: The OA configuration used by the stream. - */ - struct i915_oa_config *oa_config; - - /** - * The OA context specific information. - */ - struct intel_context *pinned_ctx; - u32 specific_ctx_id; - u32 specific_ctx_id_mask; - - struct hrtimer poll_check_timer; - wait_queue_head_t poll_wq; - bool pollin; - - bool periodic; - int period_exponent; - - /** - * State of the OA buffer. - */ - struct { - struct i915_vma *vma; - u8 *vaddr; - u32 last_ctx_id; - int format; - int format_size; - int size_exponent; - - /** - * Locks reads and writes to all head/tail state - * - * Consider: the head and tail pointer state needs to be read - * consistently from a hrtimer callback (atomic context) and - * read() fop (user context) with tail pointer updates happening - * in atomic context and head updates in user context and the - * (unlikely) possibility of read() errors needing to reset all - * head/tail state. - * - * Note: Contention/performance aren't currently a significant - * concern here considering the relatively low frequency of - * hrtimer callbacks (5ms period) and that reads typically only - * happen in response to a hrtimer event and likely complete - * before the next callback. - * - * Note: This lock is not held *while* reading and copying data - * to userspace so the value of head observed in htrimer - * callbacks won't represent any partial consumption of data. - */ - spinlock_t ptr_lock; - - /** - * One 'aging' tail pointer and one 'aged' tail pointer ready to - * used for reading. - * - * Initial values of 0xffffffff are invalid and imply that an - * update is required (and should be ignored by an attempted - * read) - */ - struct { - u32 offset; - } tails[2]; - - /** - * Index for the aged tail ready to read() data up to. - */ - unsigned int aged_tail_idx; - - /** - * A monotonic timestamp for when the current aging tail pointer - * was read; used to determine when it is old enough to trust. - */ - u64 aging_timestamp; - - /** - * Although we can always read back the head pointer register, - * we prefer to avoid trusting the HW state, just to avoid any - * risk that some hardware condition could * somehow bump the - * head pointer unpredictably and cause us to forward the wrong - * OA buffer data to userspace. - */ - u32 head; - } oa_buffer; -}; - -/** - * struct i915_oa_ops - Gen specific implementation of an OA unit stream - */ -struct i915_oa_ops { - /** - * @is_valid_b_counter_reg: Validates register's address for - * programming boolean counters for a particular platform. - */ - bool (*is_valid_b_counter_reg)(struct drm_i915_private *dev_priv, - u32 addr); - - /** - * @is_valid_mux_reg: Validates register's address for programming mux - * for a particular platform. - */ - bool (*is_valid_mux_reg)(struct drm_i915_private *dev_priv, u32 addr); - - /** - * @is_valid_flex_reg: Validates register's address for programming - * flex EU filtering for a particular platform. - */ - bool (*is_valid_flex_reg)(struct drm_i915_private *dev_priv, u32 addr); - - /** - * @enable_metric_set: Selects and applies any MUX configuration to set - * up the Boolean and Custom (B/C) counters that are part of the - * counter reports being sampled. May apply system constraints such as - * disabling EU clock gating as required. - */ - int (*enable_metric_set)(struct i915_perf_stream *stream); - - /** - * @disable_metric_set: Remove system constraints associated with using - * the OA unit. - */ - void (*disable_metric_set)(struct i915_perf_stream *stream); - - /** - * @oa_enable: Enable periodic sampling - */ - void (*oa_enable)(struct i915_perf_stream *stream); - - /** - * @oa_disable: Disable periodic sampling - */ - void (*oa_disable)(struct i915_perf_stream *stream); - - /** - * @read: Copy data from the circular OA buffer into a given userspace - * buffer. - */ - int (*read)(struct i915_perf_stream *stream, - char __user *buf, - size_t count, - size_t *offset); - - /** - * @oa_hw_tail_read: read the OA tail pointer register - * - * In particular this enables us to share all the fiddly code for - * handling the OA unit tail pointer race that affects multiple - * generations. - */ - u32 (*oa_hw_tail_read)(struct i915_perf_stream *stream); -}; - struct intel_cdclk_state { unsigned int cdclk, vco, ref, bypass; u8 voltage_level; @@ -1333,11 +945,11 @@ struct drm_i915_private { */ u32 gpio_mmio_base; + u32 hsw_psr_mmio_adjust; + /* MMIO base address for MIPI regs */ u32 mipi_mmio_base; - u32 psr_mmio_base; - u32 pps_mmio_base; wait_queue_head_t gmbus_wait_queue; @@ -1369,7 +981,6 @@ struct drm_i915_private { u32 irq_mask; u32 de_irq_mask[I915_MAX_PIPES]; }; - u32 pm_rps_events; u32 pipestat_irq_mask[I915_MAX_PIPES]; struct i915_hotplug hotplug; @@ -1399,13 +1010,14 @@ struct drm_i915_private { unsigned int fdi_pll_freq; unsigned int czclk_freq; + /* + * For reading holding any crtc lock is sufficient, + * for writing must hold all of them. + */ struct { /* * The current logical cdclk state. * See intel_atomic_state.cdclk.logical - * - * For reading holding any crtc lock is sufficient, - * for writing must hold all of them. */ struct intel_cdclk_state logical; /* @@ -1416,6 +1028,9 @@ struct drm_i915_private { /* The current hardware cdclk state */ struct intel_cdclk_state hw; + /* cdclk, divider, and ratio table from bspec */ + const struct intel_cdclk_vals *table; + int force_min_cdclk; } cdclk; @@ -1430,6 +1045,8 @@ struct drm_i915_private { /* ordered wq for modesets */ struct workqueue_struct *modeset_wq; + /* unbound hipri wq for page flips/plane updates */ + struct workqueue_struct *flip_wq; /* Display functions */ struct drm_i915_display_funcs display; @@ -1470,7 +1087,11 @@ struct drm_i915_private { */ struct mutex dpll_lock; - unsigned int active_crtcs; + /* + * For reading active_pipes, min_cdclk, min_voltage_level holding + * any crtc lock is sufficient, for writing must hold all of them. + */ + u8 active_pipes; /* minimum acceptable cdclk for each pipe */ int min_cdclk[I915_MAX_PIPES]; /* minimum acceptable voltage level for each pipe */ @@ -1499,13 +1120,6 @@ struct drm_i915_private { */ u32 edram_size_mb; - /* gen6+ GT PM state */ - struct intel_gen6_power_mgmt gt_pm; - - /* ilk-only ips/rps state. Everything in here is protected by the global - * mchdev_lock in intel_pm.c */ - struct intel_ilk_power_mgmt ips; - struct i915_power_domains power_domains; struct i915_psr psr; @@ -1530,25 +1144,7 @@ struct drm_i915_private { */ struct mutex av_mutex; int audio_power_refcount; - - struct { - struct mutex mutex; - struct list_head list; - struct llist_head free_list; - struct work_struct free_work; - - /* The hw wants to have a stable context identifier for the - * lifetime of the context (for OA, PASID, faults, etc). - * This is limited in execlists to 21 bits. - */ - struct ida hw_ida; -#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ -#define MAX_GUC_CONTEXT_HW_ID (1 << 20) /* exclusive */ -#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */ -/* in Gen12 ID 0x7FF is reserved to indicate idle */ -#define GEN12_MAX_CONTEXT_HW_ID (GEN11_MAX_CONTEXT_HW_ID - 1) - struct list_head hw_id_list; - } contexts; + u32 audio_freq_cntrl; u32 fdi_rx_config; @@ -1574,6 +1170,8 @@ struct drm_i915_private { I915_SAGV_NOT_CONTROLLED } sagv_status; + u32 sagv_block_time_us; + struct { /* * Raw watermark latency values: @@ -1644,61 +1242,7 @@ struct drm_i915_private { struct intel_runtime_pm runtime_pm; - struct { - bool initialized; - - struct kobject *metrics_kobj; - struct ctl_table_header *sysctl_header; - - /* - * Lock associated with adding/modifying/removing OA configs - * in dev_priv->perf.metrics_idr. - */ - struct mutex metrics_lock; - - /* - * List of dynamic configurations, you need to hold - * dev_priv->perf.metrics_lock to access it. - */ - struct idr metrics_idr; - - /* - * Lock associated with anything below within this structure - * except exclusive_stream. - */ - struct mutex lock; - struct list_head streams; - - /* - * The stream currently using the OA unit. If accessed - * outside a syscall associated to its file - * descriptor, you need to hold - * dev_priv->drm.struct_mutex. - */ - struct i915_perf_stream *exclusive_stream; - - /** - * For rate limiting any notifications of spurious - * invalid OA reports - */ - struct ratelimit_state spurious_report_rs; - - struct i915_oa_config test_config; - - u32 gen7_latched_oastatus1; - u32 ctx_oactxctrl_offset; - u32 ctx_flexeu0_offset; - - /** - * The RPT_ID/reason field for Gen8+ includes a bit - * to determine if the CTX ID in the report is valid - * but the specific bit differs between Gen 8 and 9 - */ - u32 gen8_valid_ctx_bit; - - struct i915_oa_ops ops; - const struct i915_oa_format *oa_formats; - } perf; + struct i915_perf perf; /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ struct intel_gt gt; @@ -1706,34 +1250,19 @@ struct drm_i915_private { struct { struct notifier_block pm_notifier; - /** - * We leave the user IRQ off as much as possible, - * but this means that requests will finish and never - * be retired once the system goes idle. Set a timer to - * fire periodically while the ring is running. When it - * fires, go retire requests. - */ - struct delayed_work retire_work; - - /** - * When we detect an idle GPU, we want to turn on - * powersaving features. So once we see that there - * are no more requests outstanding and no more - * arrive within a small period of time, we fire - * off the idle_work. - */ - struct work_struct idle_work; + struct i915_gem_contexts { + spinlock_t lock; /* locks list */ + struct list_head list; + + struct llist_head free_list; + struct work_struct free_work; + } contexts; } gem; u8 pch_ssc_use; - /* For i945gm vblank irq vs. C3 workaround */ - struct { - struct work_struct work; - struct pm_qos_request pm_qos; - u8 c3_disable_latency; - u8 enabled; - } i945gm_vblank; + /* For i915gm/i945gm vblank irq workaround */ + u8 vblank_enabled; /* perform PHY state sanity checks? */ bool chv_phy_assert[2]; @@ -1796,10 +1325,10 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) for_each_if ((engine__) = (dev_priv__)->engine[(id__)]) /* Iterator over subset of engines selected by mask */ -#define for_each_engine_masked(engine__, dev_priv__, mask__, tmp__) \ - for ((tmp__) = (mask__) & INTEL_INFO(dev_priv__)->engine_mask; \ +#define for_each_engine_masked(engine__, gt__, mask__, tmp__) \ + for ((tmp__) = (mask__) & INTEL_INFO((gt__)->i915)->engine_mask; \ (tmp__) ? \ - ((engine__) = (dev_priv__)->engine[__mask_next_bit(tmp__)]), 1 : \ + ((engine__) = (gt__)->engine[__mask_next_bit(tmp__)]), 1 : \ 0;) #define rb_to_uabi_engine(rb) \ @@ -1855,6 +1384,8 @@ static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev) (BUILD_BUG_ON_ZERO(!__builtin_constant_p(n)) + \ INTEL_INFO(dev_priv)->gen == (n)) +#define HAS_DSB(dev_priv) (INTEL_INFO(dev_priv)->display.has_dsb) + /* * Return true if revision is in range [since,until] inclusive. * @@ -1926,6 +1457,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, } #define IS_MOBILE(dev_priv) (INTEL_INFO(dev_priv)->is_mobile) +#define IS_DGFX(dev_priv) (INTEL_INFO(dev_priv)->is_dgfx) #define IS_I830(dev_priv) IS_PLATFORM(dev_priv, INTEL_I830) #define IS_I845G(dev_priv) IS_PLATFORM(dev_priv, INTEL_I845G) @@ -2060,6 +1592,11 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define IS_ICL_REVID(p, since, until) \ (IS_ICELAKE(p) && IS_REVID(p, since, until)) +#define TGL_REVID_A0 0x0 + +#define IS_TGL_REVID(p, since, until) \ + (IS_TIGERLAKE(p) && IS_REVID(p, since, until)) + #define IS_LP(dev_priv) (INTEL_INFO(dev_priv)->is_lp) #define IS_GEN9_LP(dev_priv) (IS_GEN(dev_priv, 9) && IS_LP(dev_priv)) #define IS_GEN9_BC(dev_priv) (IS_GEN(dev_priv, 9) && !IS_LP(dev_priv)) @@ -2166,6 +1703,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_IPC(dev_priv) (INTEL_INFO(dev_priv)->display.has_ipc) +#define HAS_REGION(i915, i) (INTEL_INFO(i915)->memory_regions & (i)) +#define HAS_LMEM(i915) HAS_REGION(i915, REGION_LMEM) + #define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc) /* Having GuC is not the same as using GuC */ @@ -2189,7 +1729,12 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define GT_FREQUENCY_MULTIPLIER 50 #define GEN9_FREQ_SCALER 3 -#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->num_pipes > 0) +#define INTEL_NUM_PIPES(dev_priv) (hweight8(INTEL_INFO(dev_priv)->pipe_mask)) + +#define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->pipe_mask != 0) + +/* Only valid when HAS_DISPLAY() is true */ +#define INTEL_DISPLAY_ENABLED(dev_priv) (WARN_ON(!HAS_DISPLAY(dev_priv)), !i915_modparams.disable_display) static inline bool intel_vtd_active(void) { @@ -2222,7 +1767,9 @@ extern const struct dev_pm_ops i915_pm_ops; int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent); void i915_driver_remove(struct drm_i915_private *i915); -void intel_engine_init_hangcheck(struct intel_engine_cs *engine); +int i915_resume_switcheroo(struct drm_i915_private *i915); +int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state); + int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on); static inline bool intel_gvt_active(struct drm_i915_private *dev_priv) @@ -2241,12 +1788,13 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, /* i915_gem.c */ int i915_gem_init_userptr(struct drm_i915_private *dev_priv); void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); -void i915_gem_sanitize(struct drm_i915_private *i915); -int i915_gem_init_early(struct drm_i915_private *dev_priv); +void i915_gem_init_early(struct drm_i915_private *dev_priv); void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); int i915_gem_freeze(struct drm_i915_private *dev_priv); int i915_gem_freeze_late(struct drm_i915_private *dev_priv); +struct intel_memory_region *i915_gem_shmem_setup(struct drm_i915_private *i915); + static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) { /* @@ -2331,15 +1879,11 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, return atomic_read(&error->reset_engine_count[engine->uabi_class]); } -void i915_gem_init_mmio(struct drm_i915_private *i915); int __must_check i915_gem_init(struct drm_i915_private *dev_priv); -int __must_check i915_gem_init_hw(struct drm_i915_private *dev_priv); void i915_gem_driver_register(struct drm_i915_private *i915); void i915_gem_driver_unregister(struct drm_i915_private *i915); void i915_gem_driver_remove(struct drm_i915_private *dev_priv); void i915_gem_driver_release(struct drm_i915_private *dev_priv); -int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv, - unsigned int flags, long timeout); void i915_gem_suspend(struct drm_i915_private *dev_priv); void i915_gem_suspend_late(struct drm_i915_private *dev_priv); void i915_gem_resume(struct drm_i915_private *dev_priv); @@ -2379,7 +1923,7 @@ i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id) /* i915_gem_evict.c */ int __must_check i915_gem_evict_something(struct i915_address_space *vm, u64 min_size, u64 alignment, - unsigned cache_level, + unsigned long color, u64 start, u64 end, unsigned flags); int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, @@ -2395,9 +1939,9 @@ i915_gem_object_create_internal(struct drm_i915_private *dev_priv, /* i915_gem_tiling.c */ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj) { - struct drm_i915_private *dev_priv = to_i915(obj->base.dev); + struct drm_i915_private *i915 = to_i915(obj->base.dev); - return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && + return i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 && i915_gem_object_is_tiled(obj); } @@ -2501,4 +2045,10 @@ i915_coherent_map_type(struct drm_i915_private *i915) return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC; } +static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc) +{ + return intel_guc_is_submission_supported(guc) && + intel_guc_is_running(guc); +} + #endif diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 98305d987ac1..b9eb6b3149b7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -45,13 +45,14 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_pm.h" -#include "gem/i915_gemfs.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_gt_requests.h" #include "gt/intel_mocs.h" #include "gt/intel_reset.h" #include "gt/intel_renderstate.h" +#include "gt/intel_rps.h" #include "gt/intel_workarounds.h" #include "i915_drv.h" @@ -62,20 +63,31 @@ #include "intel_pm.h" static int -insert_mappable_node(struct i915_ggtt *ggtt, - struct drm_mm_node *node, u32 size) +insert_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node, u32 size) { + int err; + + err = mutex_lock_interruptible(&ggtt->vm.mutex); + if (err) + return err; + memset(node, 0, sizeof(*node)); - return drm_mm_insert_node_in_range(&ggtt->vm.mm, node, - size, 0, I915_COLOR_UNEVICTABLE, - 0, ggtt->mappable_end, - DRM_MM_INSERT_LOW); + err = drm_mm_insert_node_in_range(&ggtt->vm.mm, node, + size, 0, I915_COLOR_UNEVICTABLE, + 0, ggtt->mappable_end, + DRM_MM_INSERT_LOW); + + mutex_unlock(&ggtt->vm.mutex); + + return err; } static void -remove_mappable_node(struct drm_mm_node *node) +remove_mappable_node(struct i915_ggtt *ggtt, struct drm_mm_node *node) { + mutex_lock(&ggtt->vm.mutex); drm_mm_remove_node(node); + mutex_unlock(&ggtt->vm.mutex); } int @@ -87,7 +99,8 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, struct i915_vma *vma; u64 pinned; - mutex_lock(&ggtt->vm.mutex); + if (mutex_lock_interruptible(&ggtt->vm.mutex)) + return -EINTR; pinned = ggtt->vm.reserved; list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) @@ -109,20 +122,24 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, LIST_HEAD(still_in_list); int ret = 0; - lockdep_assert_held(&obj->base.dev->struct_mutex); - spin_lock(&obj->vma.lock); while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, struct i915_vma, obj_link))) { + struct i915_address_space *vm = vma->vm; + + ret = -EBUSY; + if (!i915_vm_tryopen(vm)) + break; + list_move_tail(&vma->obj_link, &still_in_list); spin_unlock(&obj->vma.lock); - ret = -EBUSY; if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE || !i915_vma_is_active(vma)) ret = i915_vma_unbind(vma); + i915_vm_close(vm); spin_lock(&obj->vma.lock); } list_splice(&still_in_list, &obj->vma.list); @@ -338,10 +355,6 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, u64 remain, offset; int ret; - ret = mutex_lock_interruptible(&i915->drm.struct_mutex); - if (ret) - return ret; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); vma = ERR_PTR(-ENODEV); if (!i915_gem_object_is_tiled(obj)) @@ -351,16 +364,14 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, PIN_NOEVICT); if (!IS_ERR(vma)) { node.start = i915_ggtt_offset(vma); - node.allocated = false; + node.flags = 0; } else { ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); if (ret) - goto out_unlock; - GEM_BUG_ON(!node.allocated); + goto out_rpm; + GEM_BUG_ON(!drm_mm_node_allocated(&node)); } - mutex_unlock(&i915->drm.struct_mutex); - ret = i915_gem_object_lock_interruptible(obj); if (ret) goto out_unpin; @@ -393,7 +404,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, unsigned page_offset = offset_in_page(offset); unsigned page_length = PAGE_SIZE - page_offset; page_length = remain < page_length ? remain : page_length; - if (node.allocated) { + if (drm_mm_node_allocated(&node)) { ggtt->vm.insert_page(&ggtt->vm, i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT), node.start, I915_CACHE_NONE, 0); @@ -414,17 +425,14 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, i915_gem_object_unlock_fence(obj, fence); out_unpin: - mutex_lock(&i915->drm.struct_mutex); - if (node.allocated) { + if (drm_mm_node_allocated(&node)) { ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); - remove_mappable_node(&node); + remove_mappable_node(ggtt, &node); } else { i915_vma_unpin(vma); } -out_unlock: +out_rpm: intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return ret; } @@ -531,10 +539,6 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, void __user *user_data; int ret; - ret = mutex_lock_interruptible(&i915->drm.struct_mutex); - if (ret) - return ret; - if (i915_gem_object_has_struct_page(obj)) { /* * Avoid waking the device up if we can fallback, as @@ -544,10 +548,8 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, * using the cache bypass of indirect GGTT access. */ wakeref = intel_runtime_pm_get_if_in_use(rpm); - if (!wakeref) { - ret = -EFAULT; - goto out_unlock; - } + if (!wakeref) + return -EFAULT; } else { /* No backing pages, no fallback, we must force GGTT access */ wakeref = intel_runtime_pm_get(rpm); @@ -561,16 +563,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, PIN_NOEVICT); if (!IS_ERR(vma)) { node.start = i915_ggtt_offset(vma); - node.allocated = false; + node.flags = 0; } else { ret = insert_mappable_node(ggtt, &node, PAGE_SIZE); if (ret) goto out_rpm; - GEM_BUG_ON(!node.allocated); + GEM_BUG_ON(!drm_mm_node_allocated(&node)); } - mutex_unlock(&i915->drm.struct_mutex); - ret = i915_gem_object_lock_interruptible(obj); if (ret) goto out_unpin; @@ -604,7 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, unsigned int page_offset = offset_in_page(offset); unsigned int page_length = PAGE_SIZE - page_offset; page_length = remain < page_length ? remain : page_length; - if (node.allocated) { + if (drm_mm_node_allocated(&node)) { /* flush the write before we modify the GGTT */ intel_gt_flush_ggtt_writes(ggtt->vm.gt); ggtt->vm.insert_page(&ggtt->vm, @@ -634,18 +634,15 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, i915_gem_object_unlock_fence(obj, fence); out_unpin: - mutex_lock(&i915->drm.struct_mutex); intel_gt_flush_ggtt_writes(ggtt->vm.gt); - if (node.allocated) { + if (drm_mm_node_allocated(&node)) { ggtt->vm.clear_range(&ggtt->vm, node.start, node.size); - remove_mappable_node(&node); + remove_mappable_node(ggtt, &node); } else { i915_vma_unpin(vma); } out_rpm: intel_runtime_pm_put(rpm, wakeref); -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); return ret; } @@ -887,74 +884,6 @@ void i915_gem_runtime_suspend(struct drm_i915_private *i915) } } -static long -wait_for_timelines(struct drm_i915_private *i915, - unsigned int wait, long timeout) -{ - struct intel_gt_timelines *timelines = &i915->gt.timelines; - struct intel_timeline *tl; - unsigned long flags; - - spin_lock_irqsave(&timelines->lock, flags); - list_for_each_entry(tl, &timelines->active_list, link) { - struct i915_request *rq; - - rq = i915_active_request_get_unlocked(&tl->last_request); - if (!rq) - continue; - - spin_unlock_irqrestore(&timelines->lock, flags); - - /* - * "Race-to-idle". - * - * Switching to the kernel context is often used a synchronous - * step prior to idling, e.g. in suspend for flushing all - * current operations to memory before sleeping. These we - * want to complete as quickly as possible to avoid prolonged - * stalls, so allow the gpu to boost to maximum clocks. - */ - if (wait & I915_WAIT_FOR_IDLE_BOOST) - gen6_rps_boost(rq); - - timeout = i915_request_wait(rq, wait, timeout); - i915_request_put(rq); - if (timeout < 0) - return timeout; - - /* restart after reacquiring the lock */ - spin_lock_irqsave(&timelines->lock, flags); - tl = list_entry(&timelines->active_list, typeof(*tl), link); - } - spin_unlock_irqrestore(&timelines->lock, flags); - - return timeout; -} - -int i915_gem_wait_for_idle(struct drm_i915_private *i915, - unsigned int flags, long timeout) -{ - /* If the device is asleep, we have no requests outstanding */ - if (!intel_gt_pm_is_awake(&i915->gt)) - return 0; - - GEM_TRACE("flags=%x (%s), timeout=%ld%s\n", - flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked", - timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : ""); - - timeout = wait_for_timelines(i915, flags, timeout); - if (timeout < 0) - return timeout; - - if (flags & I915_WAIT_LOCKED) { - lockdep_assert_held(&i915->drm.struct_mutex); - - i915_retire_requests(i915); - } - - return 0; -} - struct i915_vma * i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj, const struct i915_ggtt_view *view, @@ -981,8 +910,6 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, struct i915_vma *vma; int ret; - lockdep_assert_held(&obj->base.dev->struct_mutex); - if (i915_gem_object_never_bind_ggtt(obj)) return ERR_PTR(-ENODEV); @@ -1032,13 +959,6 @@ i915_gem_object_pin(struct drm_i915_gem_object *obj, return ERR_PTR(-ENOSPC); } - WARN(i915_vma_is_pinned(vma), - "bo is already pinned in ggtt with incorrect alignment:" - " offset=%08x, req.alignment=%llx," - " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n", - i915_ggtt_offset(vma), alignment, - !!(flags & PIN_MAPPABLE), - i915_vma_is_map_and_fenceable(vma)); ret = i915_vma_unbind(vma); if (ret) return ERR_PTR(ret); @@ -1133,128 +1053,7 @@ out: return err; } -void i915_gem_sanitize(struct drm_i915_private *i915) -{ - intel_wakeref_t wakeref; - - GEM_TRACE("\n"); - - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); - - /* - * As we have just resumed the machine and woken the device up from - * deep PCI sleep (presumably D3_cold), assume the HW has been reset - * back to defaults, recovering from whatever wedged state we left it - * in and so worth trying to use the device once more. - */ - if (intel_gt_is_wedged(&i915->gt)) - intel_gt_unset_wedged(&i915->gt); - - /* - * If we inherit context state from the BIOS or earlier occupants - * of the GPU, the GPU may be in an inconsistent state when we - * try to take over. The only way to remove the earlier state - * is by resetting. However, resetting on earlier gen is tricky as - * it may impact the display and we are uncertain about the stability - * of the reset, so this could be applied to even earlier gen. - */ - intel_gt_sanitize(&i915->gt, false); - - intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); -} - -static void init_unused_ring(struct intel_gt *gt, u32 base) -{ - struct intel_uncore *uncore = gt->uncore; - - intel_uncore_write(uncore, RING_CTL(base), 0); - intel_uncore_write(uncore, RING_HEAD(base), 0); - intel_uncore_write(uncore, RING_TAIL(base), 0); - intel_uncore_write(uncore, RING_START(base), 0); -} - -static void init_unused_rings(struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - - if (IS_I830(i915)) { - init_unused_ring(gt, PRB1_BASE); - init_unused_ring(gt, SRB0_BASE); - init_unused_ring(gt, SRB1_BASE); - init_unused_ring(gt, SRB2_BASE); - init_unused_ring(gt, SRB3_BASE); - } else if (IS_GEN(i915, 2)) { - init_unused_ring(gt, SRB0_BASE); - init_unused_ring(gt, SRB1_BASE); - } else if (IS_GEN(i915, 3)) { - init_unused_ring(gt, PRB1_BASE); - init_unused_ring(gt, PRB2_BASE); - } -} - -int i915_gem_init_hw(struct drm_i915_private *i915) -{ - struct intel_uncore *uncore = &i915->uncore; - struct intel_gt *gt = &i915->gt; - int ret; - - BUG_ON(!i915->kernel_context); - ret = intel_gt_terminally_wedged(gt); - if (ret) - return ret; - - gt->last_init_time = ktime_get(); - - /* Double layer security blanket, see i915_gem_init() */ - intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL); - - if (HAS_EDRAM(i915) && INTEL_GEN(i915) < 9) - intel_uncore_rmw(uncore, HSW_IDICR, 0, IDIHASHMSK(0xf)); - - if (IS_HASWELL(i915)) - intel_uncore_write(uncore, - MI_PREDICATE_RESULT_2, - IS_HSW_GT3(i915) ? - LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED); - - /* Apply the GT workarounds... */ - intel_gt_apply_workarounds(gt); - /* ...and determine whether they are sticking. */ - intel_gt_verify_workarounds(gt, "init"); - - intel_gt_init_swizzling(gt); - - /* - * At least 830 can leave some of the unused rings - * "active" (ie. head != tail) after resume which - * will prevent c3 entry. Makes sure all unused rings - * are totally idle. - */ - init_unused_rings(gt); - - ret = i915_ppgtt_init_hw(gt); - if (ret) { - DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); - goto out; - } - - /* We can't enable contexts until all firmware is loaded */ - ret = intel_uc_init_hw(>->uc); - if (ret) { - i915_probe_error(i915, "Enabling uc failed (%d)\n", ret); - goto out; - } - - intel_mocs_init(gt); - -out: - intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); - return ret; -} - -static int __intel_engines_record_defaults(struct drm_i915_private *i915) +static int __intel_engines_record_defaults(struct intel_gt *gt) { struct i915_request *requests[I915_NUM_ENGINES] = {}; struct intel_engine_cs *engine; @@ -1270,7 +1069,7 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) * from the same default HW values. */ - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { struct intel_context *ce; struct i915_request *rq; @@ -1278,7 +1077,8 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) GEM_BUG_ON(!engine->kernel_context); engine->serial++; /* force the kernel context switch */ - ce = intel_context_create(i915->kernel_context, engine); + ce = intel_context_create(engine->kernel_context->gem_context, + engine); if (IS_ERR(ce)) { err = PTR_ERR(ce); goto out; @@ -1295,15 +1095,6 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915) if (err) goto err_rq; - /* - * Failing to program the MOCS is non-fatal.The system will not - * run at peak performance. So warn the user and carry on. - */ - err = intel_mocs_emit(rq); - if (err) - dev_notice(i915->drm.dev, - "Failed to program MOCS registers; expect performance issues.\n"); - err = intel_renderstate_emit(rq); if (err) goto err_rq; @@ -1316,7 +1107,7 @@ err_rq: } /* Flush the default context image to memory, and enable powersaving. */ - if (!i915_gem_load_power_context(i915)) { + if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) { err = -EIO; goto out; } @@ -1375,7 +1166,7 @@ out: * this is by declaring ourselves wedged. */ if (err) - intel_gt_set_wedged(&i915->gt); + intel_gt_set_wedged(gt); for (id = 0; id < ARRAY_SIZE(requests); id++) { struct intel_context *ce; @@ -1392,18 +1183,7 @@ out: return err; } -static int -i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size) -{ - return intel_gt_init_scratch(&i915->gt, size); -} - -static void i915_gem_fini_scratch(struct drm_i915_private *i915) -{ - intel_gt_fini_scratch(&i915->gt); -} - -static int intel_engines_verify_workarounds(struct drm_i915_private *i915) +static int intel_engines_verify_workarounds(struct intel_gt *gt) { struct intel_engine_cs *engine; enum intel_engine_id id; @@ -1412,7 +1192,7 @@ static int intel_engines_verify_workarounds(struct drm_i915_private *i915) if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) return 0; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { if (intel_engine_verify_workarounds(engine, "load")) err = -EIO; } @@ -1444,7 +1224,6 @@ int i915_gem_init(struct drm_i915_private *dev_priv) * we hold the forcewake during initialisation these problems * just magically go away. */ - mutex_lock(&dev_priv->drm.struct_mutex); intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); ret = i915_init_ggtt(dev_priv); @@ -1453,36 +1232,29 @@ int i915_gem_init(struct drm_i915_private *dev_priv) goto err_unlock; } - ret = i915_gem_init_scratch(dev_priv, - IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE); - if (ret) { - GEM_BUG_ON(ret == -EIO); - goto err_ggtt; - } + intel_gt_init(&dev_priv->gt); - ret = intel_engines_setup(dev_priv); + ret = intel_engines_setup(&dev_priv->gt); if (ret) { GEM_BUG_ON(ret == -EIO); goto err_unlock; } - ret = i915_gem_contexts_init(dev_priv); + ret = i915_gem_init_contexts(dev_priv); if (ret) { GEM_BUG_ON(ret == -EIO); goto err_scratch; } - ret = intel_engines_init(dev_priv); + ret = intel_engines_init(&dev_priv->gt); if (ret) { GEM_BUG_ON(ret == -EIO); goto err_context; } - intel_init_gt_powersave(dev_priv); - intel_uc_init(&dev_priv->gt.uc); - ret = i915_gem_init_hw(dev_priv); + ret = intel_gt_init_hw(&dev_priv->gt); if (ret) goto err_uc_init; @@ -1502,24 +1274,23 @@ int i915_gem_init(struct drm_i915_private *dev_priv) */ intel_init_clock_gating(dev_priv); - ret = intel_engines_verify_workarounds(dev_priv); + ret = intel_engines_verify_workarounds(&dev_priv->gt); if (ret) goto err_gt; - ret = __intel_engines_record_defaults(dev_priv); + ret = __intel_engines_record_defaults(&dev_priv->gt); if (ret) goto err_gt; - ret = i915_inject_load_error(dev_priv, -ENODEV); + ret = i915_inject_probe_error(dev_priv, -ENODEV); if (ret) goto err_gt; - ret = i915_inject_load_error(dev_priv, -EIO); + ret = i915_inject_probe_error(dev_priv, -EIO); if (ret) goto err_gt; intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - mutex_unlock(&dev_priv->drm.struct_mutex); return 0; @@ -1530,32 +1301,25 @@ int i915_gem_init(struct drm_i915_private *dev_priv) * driver doesn't explode during runtime. */ err_gt: - mutex_unlock(&dev_priv->drm.struct_mutex); - - intel_gt_set_wedged(&dev_priv->gt); + intel_gt_set_wedged_on_init(&dev_priv->gt); i915_gem_suspend(dev_priv); i915_gem_suspend_late(dev_priv); i915_gem_drain_workqueue(dev_priv); - - mutex_lock(&dev_priv->drm.struct_mutex); err_init_hw: intel_uc_fini_hw(&dev_priv->gt.uc); err_uc_init: if (ret != -EIO) { intel_uc_fini(&dev_priv->gt.uc); - intel_cleanup_gt_powersave(dev_priv); - intel_engines_cleanup(dev_priv); + intel_engines_cleanup(&dev_priv->gt); } err_context: if (ret != -EIO) - i915_gem_contexts_fini(dev_priv); + i915_gem_driver_release__contexts(dev_priv); err_scratch: - i915_gem_fini_scratch(dev_priv); -err_ggtt: + intel_gt_driver_release(&dev_priv->gt); err_unlock: intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - mutex_unlock(&dev_priv->drm.struct_mutex); if (ret != -EIO) { intel_uc_cleanup_firmwares(&dev_priv->gt.uc); @@ -1564,8 +1328,6 @@ err_unlock: } if (ret == -EIO) { - mutex_lock(&dev_priv->drm.struct_mutex); - /* * Allow engines or uC initialisation to fail by marking the GPU * as wedged. But we only want to do this when the GPU is angry, @@ -1580,10 +1342,8 @@ err_unlock: /* Minimal basic recovery for KMS */ ret = i915_ggtt_enable_hw(dev_priv); i915_gem_restore_gtt_mappings(dev_priv); - i915_gem_restore_fences(dev_priv); + i915_gem_restore_fences(&dev_priv->ggtt); intel_init_clock_gating(dev_priv); - - mutex_unlock(&dev_priv->drm.struct_mutex); } i915_gem_drain_freed_objects(dev_priv); @@ -1604,48 +1364,35 @@ void i915_gem_driver_unregister(struct drm_i915_private *i915) void i915_gem_driver_remove(struct drm_i915_private *dev_priv) { - GEM_BUG_ON(dev_priv->gt.awake); - intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref); i915_gem_suspend_late(dev_priv); - intel_disable_gt_powersave(dev_priv); + intel_gt_driver_remove(&dev_priv->gt); /* Flush any outstanding unpin_work. */ i915_gem_drain_workqueue(dev_priv); - mutex_lock(&dev_priv->drm.struct_mutex); intel_uc_fini_hw(&dev_priv->gt.uc); intel_uc_fini(&dev_priv->gt.uc); - mutex_unlock(&dev_priv->drm.struct_mutex); i915_gem_drain_freed_objects(dev_priv); } void i915_gem_driver_release(struct drm_i915_private *dev_priv) { - mutex_lock(&dev_priv->drm.struct_mutex); - intel_engines_cleanup(dev_priv); - i915_gem_contexts_fini(dev_priv); - i915_gem_fini_scratch(dev_priv); - mutex_unlock(&dev_priv->drm.struct_mutex); + intel_engines_cleanup(&dev_priv->gt); + i915_gem_driver_release__contexts(dev_priv); + intel_gt_driver_release(&dev_priv->gt); intel_wa_list_free(&dev_priv->gt_wa_list); - intel_cleanup_gt_powersave(dev_priv); - intel_uc_cleanup_firmwares(&dev_priv->gt.uc); i915_gem_cleanup_userptr(dev_priv); intel_timelines_fini(dev_priv); i915_gem_drain_freed_objects(dev_priv); - WARN_ON(!list_empty(&dev_priv->contexts.list)); -} - -void i915_gem_init_mmio(struct drm_i915_private *i915) -{ - i915_gem_sanitize(i915); + WARN_ON(!list_empty(&dev_priv->gem.contexts.list)); } static void i915_gem_init__mm(struct drm_i915_private *i915) @@ -1660,20 +1407,11 @@ static void i915_gem_init__mm(struct drm_i915_private *i915) i915_gem_init__objects(i915); } -int i915_gem_init_early(struct drm_i915_private *dev_priv) +void i915_gem_init_early(struct drm_i915_private *dev_priv) { - int err; - i915_gem_init__mm(dev_priv); - i915_gem_init__pm(dev_priv); spin_lock_init(&dev_priv->fb_tracking.lock); - - err = i915_gemfs_init(dev_priv); - if (err) - DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err); - - return 0; } void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) @@ -1682,8 +1420,6 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv) GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); WARN_ON(dev_priv->mm.shrink_count); - - i915_gemfs_fini(dev_priv); } int i915_gem_freeze(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_gem.h b/drivers/gpu/drm/i915/i915_gem.h index 6795f1daa3d5..f6f9675848b8 100644 --- a/drivers/gpu/drm/i915/i915_gem.h +++ b/drivers/gpu/drm/i915/i915_gem.h @@ -37,10 +37,8 @@ struct drm_i915_private; #define GEM_SHOW_DEBUG() (drm_debug & DRM_UT_DRIVER) #define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \ - pr_err("%s:%d GEM_BUG_ON(%s)\n", \ - __func__, __LINE__, __stringify(condition)); \ - GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \ - __func__, __LINE__, __stringify(condition)); \ + GEM_TRACE_ERR("%s:%d GEM_BUG_ON(%s)\n", \ + __func__, __LINE__, __stringify(condition)); \ BUG(); \ } \ } while(0) @@ -66,11 +64,16 @@ struct drm_i915_private; #if IS_ENABLED(CONFIG_DRM_I915_TRACE_GEM) #define GEM_TRACE(...) trace_printk(__VA_ARGS__) +#define GEM_TRACE_ERR(...) do { \ + pr_err(__VA_ARGS__); \ + trace_printk(__VA_ARGS__); \ +} while (0) #define GEM_TRACE_DUMP() ftrace_dump(DUMP_ALL) #define GEM_TRACE_DUMP_ON(expr) \ do { if (expr) ftrace_dump(DUMP_ALL); } while (0) #else #define GEM_TRACE(...) do { } while (0) +#define GEM_TRACE_ERR(...) do { } while (0) #define GEM_TRACE_DUMP() do { } while (0) #define GEM_TRACE_DUMP_ON(expr) BUILD_BUG_ON_INVALID(expr) #endif @@ -83,6 +86,11 @@ static inline void tasklet_lock(struct tasklet_struct *t) cpu_relax(); } +static inline bool tasklet_is_locked(const struct tasklet_struct *t) +{ + return test_bit(TASKLET_STATE_RUN, &t->state); +} + static inline void __tasklet_disable_sync_once(struct tasklet_struct *t) { if (!atomic_fetch_inc(&t->count)) diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 52c86c6e0673..7e62c310290f 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -29,6 +29,7 @@ #include <drm/i915_drm.h> #include "gem/i915_gem_context.h" +#include "gt/intel_gt_requests.h" #include "i915_drv.h" #include "i915_trace.h" @@ -37,7 +38,7 @@ I915_SELFTEST_DECLARE(static struct igt_evict_ctl { bool fail_if_busy:1; } igt_evict_ctl;) -static int ggtt_flush(struct drm_i915_private *i915) +static int ggtt_flush(struct intel_gt *gt) { /* * Not everything in the GGTT is tracked via vma (otherwise we @@ -46,10 +47,7 @@ static int ggtt_flush(struct drm_i915_private *i915) * the hopes that we can then remove contexts and the like only * bound by their active reference. */ - return i915_gem_wait_for_idle(i915, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); + return intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); } static bool @@ -70,7 +68,7 @@ mark_free(struct drm_mm_scan *scan, * @vm: address space to evict from * @min_size: size of the desired free space * @alignment: alignment constraint of the desired free space - * @cache_level: cache_level for the desired space + * @color: color for the desired space * @start: start (inclusive) of the range from which to evict objects * @end: end (exclusive) of the range from which to evict objects * @flags: additional flags to control the eviction algorithm @@ -91,11 +89,10 @@ mark_free(struct drm_mm_scan *scan, int i915_gem_evict_something(struct i915_address_space *vm, u64 min_size, u64 alignment, - unsigned cache_level, + unsigned long color, u64 start, u64 end, unsigned flags) { - struct drm_i915_private *dev_priv = vm->i915; struct drm_mm_scan scan; struct list_head eviction_list; struct i915_vma *vma, *next; @@ -104,7 +101,7 @@ i915_gem_evict_something(struct i915_address_space *vm, struct i915_vma *active; int ret; - lockdep_assert_held(&vm->i915->drm.struct_mutex); + lockdep_assert_held(&vm->mutex); trace_i915_gem_evict(vm, min_size, alignment, flags); /* @@ -124,17 +121,10 @@ i915_gem_evict_something(struct i915_address_space *vm, if (flags & PIN_MAPPABLE) mode = DRM_MM_INSERT_LOW; drm_mm_scan_init_with_range(&scan, &vm->mm, - min_size, alignment, cache_level, + min_size, alignment, color, start, end, mode); - /* - * Retire before we search the active list. Although we have - * reasonable accuracy in our retirement lists, we may have - * a stray pin (preventing eviction) that can only be resolved by - * retiring. - */ - if (!(flags & PIN_NONBLOCK)) - i915_retire_requests(dev_priv); + intel_gt_retire_requests(vm->gt); search_again: active = NULL; @@ -207,7 +197,7 @@ search_again: if (I915_SELFTEST_ONLY(igt_evict_ctl.fail_if_busy)) return -EBUSY; - ret = ggtt_flush(dev_priv); + ret = ggtt_flush(vm->gt); if (ret) return ret; @@ -235,12 +225,12 @@ found: list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { __i915_vma_unpin(vma); if (ret == 0) - ret = i915_vma_unbind(vma); + ret = __i915_vma_unbind(vma); } while (ret == 0 && (node = drm_mm_scan_color_evict(&scan))) { vma = container_of(node, struct i915_vma, node); - ret = i915_vma_unbind(vma); + ret = __i915_vma_unbind(vma); } return ret; @@ -266,25 +256,23 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, u64 start = target->start; u64 end = start + target->size; struct i915_vma *vma, *next; - bool check_color; int ret = 0; - lockdep_assert_held(&vm->i915->drm.struct_mutex); + lockdep_assert_held(&vm->mutex); GEM_BUG_ON(!IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); trace_i915_gem_evict_node(vm, target, flags); - /* Retire before we search the active list. Although we have + /* + * Retire before we search the active list. Although we have * reasonable accuracy in our retirement lists, we may have * a stray pin (preventing eviction) that can only be resolved by * retiring. */ - if (!(flags & PIN_NONBLOCK)) - i915_retire_requests(vm->i915); + intel_gt_retire_requests(vm->gt); - check_color = vm->mm.color_adjust; - if (check_color) { + if (i915_vm_has_cache_coloring(vm)) { /* Expand search to cover neighbouring guard pages (or lack!) */ if (start) start -= I915_GTT_PAGE_SIZE; @@ -301,7 +289,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, break; } - GEM_BUG_ON(!node->allocated); + GEM_BUG_ON(!drm_mm_node_allocated(node)); vma = container_of(node, typeof(*vma), node); /* If we are using coloring to insert guard pages between @@ -310,7 +298,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, * abutt and conflict. If they are in conflict, then we evict * those as well to make room for our guard pages. */ - if (check_color) { + if (i915_vm_has_cache_coloring(vm)) { if (node->start + node->size == target->start) { if (node->color == target->color) continue; @@ -351,7 +339,7 @@ int i915_gem_evict_for_node(struct i915_address_space *vm, list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { __i915_vma_unpin(vma); if (ret == 0) - ret = i915_vma_unbind(vma); + ret = __i915_vma_unbind(vma); } return ret; @@ -375,7 +363,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm) struct i915_vma *vma, *next; int ret; - lockdep_assert_held(&vm->i915->drm.struct_mutex); + lockdep_assert_held(&vm->mutex); trace_i915_gem_evict_vm(vm); /* Switch back to the default context in order to unpin @@ -384,13 +372,12 @@ int i915_gem_evict_vm(struct i915_address_space *vm) * switch otherwise is ineffective. */ if (i915_is_ggtt(vm)) { - ret = ggtt_flush(vm->i915); + ret = ggtt_flush(vm->gt); if (ret) return ret; } INIT_LIST_HEAD(&eviction_list); - mutex_lock(&vm->mutex); list_for_each_entry(vma, &vm->bound_list, vm_link) { if (i915_vma_is_pinned(vma)) continue; @@ -398,13 +385,12 @@ int i915_gem_evict_vm(struct i915_address_space *vm) __i915_vma_pin(vma); list_add(&vma->evict_link, &eviction_list); } - mutex_unlock(&vm->mutex); ret = 0; list_for_each_entry_safe(vma, next, &eviction_list, evict_link) { __i915_vma_unpin(vma); if (ret == 0) - ret = i915_vma_unbind(vma); + ret = __i915_vma_unbind(vma); } return ret; } diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index 615a9f4ef30c..71efccfde122 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -59,6 +59,16 @@ #define pipelined 0 +static struct drm_i915_private *fence_to_i915(struct i915_fence_reg *fence) +{ + return fence->ggtt->vm.i915; +} + +static struct intel_uncore *fence_to_uncore(struct i915_fence_reg *fence) +{ + return fence->ggtt->vm.gt->uncore; +} + static void i965_write_fence_reg(struct i915_fence_reg *fence, struct i915_vma *vma) { @@ -66,7 +76,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence, int fence_pitch_shift; u64 val; - if (INTEL_GEN(fence->i915) >= 6) { + if (INTEL_GEN(fence_to_i915(fence)) >= 6) { fence_reg_lo = FENCE_REG_GEN6_LO(fence->id); fence_reg_hi = FENCE_REG_GEN6_HI(fence->id); fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT; @@ -95,7 +105,7 @@ static void i965_write_fence_reg(struct i915_fence_reg *fence, } if (!pipelined) { - struct intel_uncore *uncore = &fence->i915->uncore; + struct intel_uncore *uncore = fence_to_uncore(fence); /* * To w/a incoherency with non-atomic 64-bit register updates, @@ -132,7 +142,7 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence, GEM_BUG_ON(!is_power_of_2(vma->fence_size)); GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size)); - if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915)) + if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence_to_i915(fence))) stride /= 128; else stride /= 512; @@ -148,7 +158,7 @@ static void i915_write_fence_reg(struct i915_fence_reg *fence, } if (!pipelined) { - struct intel_uncore *uncore = &fence->i915->uncore; + struct intel_uncore *uncore = fence_to_uncore(fence); i915_reg_t reg = FENCE_REG(fence->id); intel_uncore_write_fw(uncore, reg, val); @@ -180,7 +190,7 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence, } if (!pipelined) { - struct intel_uncore *uncore = &fence->i915->uncore; + struct intel_uncore *uncore = fence_to_uncore(fence); i915_reg_t reg = FENCE_REG(fence->id); intel_uncore_write_fw(uncore, reg, val); @@ -191,15 +201,17 @@ static void i830_write_fence_reg(struct i915_fence_reg *fence, static void fence_write(struct i915_fence_reg *fence, struct i915_vma *vma) { + struct drm_i915_private *i915 = fence_to_i915(fence); + /* * Previous access through the fence register is marshalled by * the mb() inside the fault handlers (i915_gem_release_mmaps) * and explicitly managed for internal users. */ - if (IS_GEN(fence->i915, 2)) + if (IS_GEN(i915, 2)) i830_write_fence_reg(fence, vma); - else if (IS_GEN(fence->i915, 3)) + else if (IS_GEN(i915, 3)) i915_write_fence_reg(fence, vma); else i965_write_fence_reg(fence, vma); @@ -215,6 +227,8 @@ static void fence_write(struct i915_fence_reg *fence, static int fence_update(struct i915_fence_reg *fence, struct i915_vma *vma) { + struct i915_ggtt *ggtt = fence->ggtt; + struct intel_uncore *uncore = fence_to_uncore(fence); intel_wakeref_t wakeref; struct i915_vma *old; int ret; @@ -230,14 +244,15 @@ static int fence_update(struct i915_fence_reg *fence, i915_gem_object_get_tiling(vma->obj))) return -EINVAL; - ret = i915_active_wait(&vma->active); + ret = i915_vma_sync(vma); if (ret) return ret; } old = xchg(&fence->vma, NULL); if (old) { - ret = i915_active_wait(&old->active); + /* XXX Ideally we would move the waiting to outside the mutex */ + ret = i915_vma_sync(old); if (ret) { fence->vma = old; return ret; @@ -255,7 +270,7 @@ static int fence_update(struct i915_fence_reg *fence, old->fence = NULL; } - list_move(&fence->link, &fence->i915->ggtt.fence_list); + list_move(&fence->link, &ggtt->fence_list); } /* @@ -268,7 +283,7 @@ static int fence_update(struct i915_fence_reg *fence, * be cleared before we can use any other fences to ensure that * the new fences do not overlap the elided clears, confusing HW. */ - wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm); + wakeref = intel_runtime_pm_get_if_in_use(uncore->rpm); if (!wakeref) { GEM_BUG_ON(vma); return 0; @@ -279,10 +294,10 @@ static int fence_update(struct i915_fence_reg *fence, if (vma) { vma->fence = fence; - list_move_tail(&fence->link, &fence->i915->ggtt.fence_list); + list_move_tail(&fence->link, &ggtt->fence_list); } - intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref); + intel_runtime_pm_put(uncore->rpm, wakeref); return 0; } @@ -311,11 +326,11 @@ int i915_vma_revoke_fence(struct i915_vma *vma) return fence_update(fence, NULL); } -static struct i915_fence_reg *fence_find(struct drm_i915_private *i915) +static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt) { struct i915_fence_reg *fence; - list_for_each_entry(fence, &i915->ggtt.fence_list, link) { + list_for_each_entry(fence, &ggtt->fence_list, link) { GEM_BUG_ON(fence->vma && fence->vma->fence != fence); if (atomic_read(&fence->pin_count)) @@ -325,19 +340,21 @@ static struct i915_fence_reg *fence_find(struct drm_i915_private *i915) } /* Wait for completion of pending flips which consume fences */ - if (intel_has_pending_fb_unpin(i915)) + if (intel_has_pending_fb_unpin(ggtt->vm.i915)) return ERR_PTR(-EAGAIN); return ERR_PTR(-EDEADLK); } -static int __i915_vma_pin_fence(struct i915_vma *vma) +int __i915_vma_pin_fence(struct i915_vma *vma) { struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm); struct i915_fence_reg *fence; struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL; int err; + lockdep_assert_held(&vma->vm->mutex); + /* Just update our place in the LRU if our fence is getting reused. */ if (vma->fence) { fence = vma->fence; @@ -348,7 +365,7 @@ static int __i915_vma_pin_fence(struct i915_vma *vma) return 0; } } else if (set) { - fence = fence_find(vma->vm->i915); + fence = fence_find(ggtt); if (IS_ERR(fence)) return PTR_ERR(fence); @@ -399,7 +416,7 @@ int i915_vma_pin_fence(struct i915_vma *vma) * Note that we revoke fences on runtime suspend. Therefore the user * must keep the device awake whilst using the fence. */ - assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm); + assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm); GEM_BUG_ON(!i915_vma_is_pinned(vma)); GEM_BUG_ON(!i915_vma_is_ggtt(vma)); @@ -415,14 +432,13 @@ int i915_vma_pin_fence(struct i915_vma *vma) /** * i915_reserve_fence - Reserve a fence for vGPU - * @i915: i915 device private + * @ggtt: Global GTT * * This function walks the fence regs looking for a free one and remove * it from the fence_list. It is used to reserve fence for vGPU to use. */ -struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915) +struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; struct i915_fence_reg *fence; int count; int ret; @@ -436,7 +452,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915) if (count <= 1) return ERR_PTR(-ENOSPC); - fence = fence_find(i915); + fence = fence_find(ggtt); if (IS_ERR(fence)) return fence; @@ -460,7 +476,7 @@ struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915) */ void i915_unreserve_fence(struct i915_fence_reg *fence) { - struct i915_ggtt *ggtt = &fence->i915->ggtt; + struct i915_ggtt *ggtt = fence->ggtt; lockdep_assert_held(&ggtt->vm.mutex); @@ -469,19 +485,19 @@ void i915_unreserve_fence(struct i915_fence_reg *fence) /** * i915_gem_restore_fences - restore fence state - * @i915: i915 device private + * @ggtt: Global GTT * * Restore the hw fence state to match the software tracking again, to be called * after a gpu reset and on resume. Note that on runtime suspend we only cancel * the fences, to be reacquired by the user later. */ -void i915_gem_restore_fences(struct drm_i915_private *i915) +void i915_gem_restore_fences(struct i915_ggtt *ggtt) { int i; rcu_read_lock(); /* keep obj alive as we dereference */ - for (i = 0; i < i915->ggtt.num_fences; i++) { - struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i]; + for (i = 0; i < ggtt->num_fences; i++) { + struct i915_fence_reg *reg = &ggtt->fence_regs[i]; struct i915_vma *vma = READ_ONCE(reg->vma); GEM_BUG_ON(vma && vma->fence != reg); @@ -547,15 +563,16 @@ void i915_gem_restore_fences(struct drm_i915_private *i915) */ /** - * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern - * @i915: i915 device private + * detect_bit_6_swizzle - detect bit 6 swizzling pattern + * @ggtt: Global GGTT * * Detects bit 6 swizzling of address lookup between IGD access and CPU * access through main memory. */ -static void detect_bit_6_swizzle(struct drm_i915_private *i915) +static void detect_bit_6_swizzle(struct i915_ggtt *ggtt) { - struct intel_uncore *uncore = &i915->uncore; + struct intel_uncore *uncore = ggtt->vm.gt->uncore; + struct drm_i915_private *i915 = ggtt->vm.i915; u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; @@ -717,8 +734,8 @@ static void detect_bit_6_swizzle(struct drm_i915_private *i915) swizzle_y = I915_BIT_6_SWIZZLE_NONE; } - i915->mm.bit_6_swizzle_x = swizzle_x; - i915->mm.bit_6_swizzle_y = swizzle_y; + i915->ggtt.bit_6_swizzle_x = swizzle_x; + i915->ggtt.bit_6_swizzle_y = swizzle_y; } /* @@ -819,17 +836,20 @@ i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj, void i915_ggtt_init_fences(struct i915_ggtt *ggtt) { struct drm_i915_private *i915 = ggtt->vm.i915; + struct intel_uncore *uncore = ggtt->vm.gt->uncore; int num_fences; int i; INIT_LIST_HEAD(&ggtt->fence_list); INIT_LIST_HEAD(&ggtt->userfault_list); - intel_wakeref_auto_init(&ggtt->userfault_wakeref, &i915->runtime_pm); + intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm); - detect_bit_6_swizzle(i915); + detect_bit_6_swizzle(ggtt); - if (INTEL_GEN(i915) >= 7 && - !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))) + if (!i915_ggtt_has_aperture(ggtt)) + num_fences = 0; + else if (INTEL_GEN(i915) >= 7 && + !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))) num_fences = 32; else if (INTEL_GEN(i915) >= 4 || IS_I945G(i915) || IS_I945GM(i915) || @@ -839,20 +859,20 @@ void i915_ggtt_init_fences(struct i915_ggtt *ggtt) num_fences = 8; if (intel_vgpu_active(i915)) - num_fences = intel_uncore_read(&i915->uncore, + num_fences = intel_uncore_read(uncore, vgtif_reg(avail_rs.fence_num)); /* Initialize fence registers to zero */ for (i = 0; i < num_fences; i++) { struct i915_fence_reg *fence = &ggtt->fence_regs[i]; - fence->i915 = i915; + fence->ggtt = ggtt; fence->id = i; list_add_tail(&fence->link, &ggtt->fence_list); } ggtt->num_fences = num_fences; - i915_gem_restore_fences(i915); + i915_gem_restore_fences(ggtt); } void intel_gt_init_swizzling(struct intel_gt *gt) @@ -861,7 +881,7 @@ void intel_gt_init_swizzling(struct intel_gt *gt) struct intel_uncore *uncore = gt->uncore; if (INTEL_GEN(i915) < 5 || - i915->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) + i915->ggtt.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE) return; intel_uncore_rmw(uncore, DISP_ARB_CTL, 0, DISP_TILE_SURFACE_SWIZZLING); diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.h b/drivers/gpu/drm/i915/i915_gem_fence_reg.h index 99866fb9d94f..7bd521cd7cd7 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.h +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.h @@ -29,7 +29,6 @@ #include <linux/types.h> struct drm_i915_gem_object; -struct drm_i915_private; struct i915_ggtt; struct i915_vma; struct intel_gt; @@ -39,7 +38,7 @@ struct sg_table; struct i915_fence_reg { struct list_head link; - struct drm_i915_private *i915; + struct i915_ggtt *ggtt; struct i915_vma *vma; atomic_t pin_count; int id; @@ -55,10 +54,10 @@ struct i915_fence_reg { }; /* i915_gem_fence_reg.c */ -struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915); +struct i915_fence_reg *i915_reserve_fence(struct i915_ggtt *ggtt); void i915_unreserve_fence(struct i915_fence_reg *fence); -void i915_gem_restore_fences(struct drm_i915_private *i915); +void i915_gem_restore_fences(struct i915_ggtt *ggtt); void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj, struct sg_table *pages); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index b1a7a8b9b46a..6239a9adbf14 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -38,6 +38,7 @@ #include "display/intel_frontbuffer.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" #include "i915_drv.h" #include "i915_scatterlist.h" @@ -132,9 +133,15 @@ static void gen6_ggtt_invalidate(struct i915_ggtt *ggtt) static void guc_ggtt_invalidate(struct i915_ggtt *ggtt) { struct intel_uncore *uncore = ggtt->vm.gt->uncore; + struct drm_i915_private *i915 = ggtt->vm.i915; gen6_ggtt_invalidate(ggtt); - intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); + + if (INTEL_GEN(i915) >= 12) + intel_uncore_write_fw(uncore, GEN12_GUC_TLB_INV_CR, + GEN12_GUC_TLB_INV_CR_INVALIDATE); + else + intel_uncore_write_fw(uncore, GEN8_GTCR, GEN8_GTCR_INVALIDATE); } static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) @@ -144,16 +151,18 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt) static int ppgtt_bind_vma(struct i915_vma *vma, enum i915_cache_level cache_level, - u32 unused) + u32 flags) { u32 pte_flags; int err; - if (!(vma->flags & I915_VMA_LOCAL_BIND)) { + if (flags & I915_VMA_ALLOC) { err = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->size); if (err) return err; + + set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); } /* Applicable to VLV, and gen8+ */ @@ -161,14 +170,17 @@ static int ppgtt_bind_vma(struct i915_vma *vma, if (i915_gem_object_is_readonly(vma->obj)) pte_flags |= PTE_READ_ONLY; + GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))); vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags); + wmb(); return 0; } static void ppgtt_unbind_vma(struct i915_vma *vma) { - vma->vm->clear_range(vma->vm, vma->node.start, vma->size); + if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) + vma->vm->clear_range(vma->vm, vma->node.start, vma->size); } static int ppgtt_set_pages(struct i915_vma *vma) @@ -496,22 +508,26 @@ static void i915_address_space_fini(struct i915_address_space *vm) mutex_destroy(&vm->mutex); } -static void ppgtt_destroy_vma(struct i915_address_space *vm) +void __i915_vm_close(struct i915_address_space *vm) { - struct list_head *phases[] = { - &vm->bound_list, - &vm->unbound_list, - NULL, - }, **phase; + struct i915_vma *vma, *vn; + + mutex_lock(&vm->mutex); + list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) { + struct drm_i915_gem_object *obj = vma->obj; - mutex_lock(&vm->i915->drm.struct_mutex); - for (phase = phases; *phase; phase++) { - struct i915_vma *vma, *vn; + /* Keep the obj (and hence the vma) alive as _we_ destroy it */ + if (!kref_get_unless_zero(&obj->base.refcount)) + continue; + + atomic_and(~I915_VMA_PIN_MASK, &vma->flags); + WARN_ON(__i915_vma_unbind(vma)); + i915_vma_destroy(vma); - list_for_each_entry_safe(vma, vn, *phase, vm_link) - i915_vma_destroy(vma); + i915_gem_object_put(obj); } - mutex_unlock(&vm->i915->drm.struct_mutex); + GEM_BUG_ON(!list_empty(&vm->bound_list)); + mutex_unlock(&vm->mutex); } static void __i915_vm_release(struct work_struct *work) @@ -519,11 +535,6 @@ static void __i915_vm_release(struct work_struct *work) struct i915_address_space *vm = container_of(work, struct i915_address_space, rcu.work); - ppgtt_destroy_vma(vm); - - GEM_BUG_ON(!list_empty(&vm->bound_list)); - GEM_BUG_ON(!list_empty(&vm->unbound_list)); - vm->cleanup(vm); i915_address_space_fini(vm); @@ -538,7 +549,6 @@ void i915_vm_release(struct kref *kref) GEM_BUG_ON(i915_is_ggtt(vm)); trace_i915_ppgtt_release(vm); - vm->closed = true; queue_rcu_work(vm->i915->wq, &vm->rcu); } @@ -546,6 +556,7 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass) { kref_init(&vm->ref); INIT_RCU_WORK(&vm->rcu, __i915_vm_release); + atomic_set(&vm->open, 1); /* * The vm->mutex must be reclaim safe (for use in the shrinker). @@ -562,7 +573,6 @@ static void i915_address_space_init(struct i915_address_space *vm, int subclass) stash_init(&vm->free_pages); - INIT_LIST_HEAD(&vm->unbound_list); INIT_LIST_HEAD(&vm->bound_list); } @@ -816,17 +826,6 @@ release_pd_entry(struct i915_page_directory * const pd, return free; } -/* - * PDE TLBs are a pain to invalidate on GEN8+. When we modify - * the page table structures, we mark them dirty so that - * context switching/execlist queuing code takes extra steps - * to ensure that tlbs are flushed. - */ -static void mark_tlbs_dirty(struct i915_ppgtt *ppgtt) -{ - ppgtt->pd_dirty_engines = ALL_ENGINES; -} - static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create) { struct drm_i915_private *dev_priv = ppgtt->vm.i915; @@ -1367,7 +1366,9 @@ static int gen8_init_scratch(struct i915_address_space *vm) if (vm->has_read_only && vm->i915->kernel_context && vm->i915->kernel_context->vm) { - struct i915_address_space *clone = vm->i915->kernel_context->vm; + struct i915_address_space *clone = + rcu_dereference_protected(vm->i915->kernel_context->vm, + true); /* static */ GEM_BUG_ON(!clone->has_read_only); @@ -1422,6 +1423,7 @@ static int gen8_preallocate_top_level_pdp(struct i915_ppgtt *ppgtt) set_pd_entry(pd, idx, pde); atomic_inc(px_used(pde)); /* keep pinned */ } + wmb(); return 0; } @@ -1489,8 +1491,10 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) * * Gen11 has HSDES#:1807136187 unresolved. Disable ro support * for now. + * + * Gen12 has inherited the same read-only fault issue from gen11. */ - ppgtt->vm.has_read_only = INTEL_GEN(i915) != 11; + ppgtt->vm.has_read_only = !IS_GEN_RANGE(i915, 11, 12); /* There are only few exceptions for gen >=6. chv and bxt. * And we are not sure about the latter so play safe for now. @@ -1509,13 +1513,12 @@ static struct i915_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915) } if (!i915_vm_is_4lvl(&ppgtt->vm)) { - if (intel_vgpu_active(i915)) { - err = gen8_preallocate_top_level_pdp(ppgtt); - if (err) - goto err_free_pd; - } + err = gen8_preallocate_top_level_pdp(ppgtt); + if (err) + goto err_free_pd; } + ppgtt->vm.bind_async_flags = I915_VMA_LOCAL_BIND; ppgtt->vm.insert_entries = gen8_ppgtt_insert; ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc; ppgtt->vm.clear_range = gen8_ppgtt_clear; @@ -1566,7 +1569,7 @@ static void gen7_ppgtt_enable(struct intel_gt *gt) } intel_uncore_write(uncore, GAM_ECOCHK, ecochk); - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { /* GFX_MODE is per-ring on gen7+ */ ENGINE_WRITE(engine, RING_MODE_GEN7, @@ -1729,10 +1732,8 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, } spin_unlock(&pd->lock); - if (flush) { - mark_tlbs_dirty(&ppgtt->base); + if (flush) gen6_ggtt_invalidate(vm->gt->ggtt); - } goto out; @@ -1786,15 +1787,13 @@ static void gen6_ppgtt_free_pd(struct gen6_ppgtt *ppgtt) static void gen6_ppgtt_cleanup(struct i915_address_space *vm) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); - struct drm_i915_private *i915 = vm->i915; - /* FIXME remove the struct_mutex to bring the locking under control */ - mutex_lock(&i915->drm.struct_mutex); i915_vma_destroy(ppgtt->vma); - mutex_unlock(&i915->drm.struct_mutex); gen6_ppgtt_free_pd(ppgtt); free_scratch(vm); + + mutex_destroy(&ppgtt->pin_mutex); kfree(ppgtt->base.pd); } @@ -1827,7 +1826,6 @@ static int pd_vma_bind(struct i915_vma *vma, gen6_for_all_pdes(pt, ppgtt->base.pd, pde) gen6_write_pde(ppgtt, pde, pt); - mark_tlbs_dirty(&ppgtt->base); gen6_ggtt_invalidate(ggtt); return 0; @@ -1866,7 +1864,6 @@ static const struct i915_vma_ops pd_vma_ops = { static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) { - struct drm_i915_private *i915 = ppgtt->base.vm.i915; struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt; struct i915_vma *vma; @@ -1877,33 +1874,30 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) if (!vma) return ERR_PTR(-ENOMEM); - i915_active_init(i915, &vma->active, NULL, NULL); + i915_active_init(&vma->active, NULL, NULL); - vma->vm = &ggtt->vm; + mutex_init(&vma->pages_mutex); + vma->vm = i915_vm_get(&ggtt->vm); vma->ops = &pd_vma_ops; vma->private = ppgtt; vma->size = size; vma->fence_size = size; - vma->flags = I915_VMA_GGTT; + atomic_set(&vma->flags, I915_VMA_GGTT); vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */ INIT_LIST_HEAD(&vma->obj_link); INIT_LIST_HEAD(&vma->closed_link); - mutex_lock(&vma->vm->mutex); - list_add(&vma->vm_link, &vma->vm->unbound_list); - mutex_unlock(&vma->vm->mutex); - return vma; } int gen6_ppgtt_pin(struct i915_ppgtt *base) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); - int err; + int err = 0; - GEM_BUG_ON(ppgtt->base.vm.closed); + GEM_BUG_ON(!atomic_read(&ppgtt->base.vm.open)); /* * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt @@ -1911,24 +1905,26 @@ int gen6_ppgtt_pin(struct i915_ppgtt *base) * (When vma->pin_count becomes atomic, I expect we will naturally * need a larger, unpacked, type and kill this redundancy.) */ - if (ppgtt->pin_count++) + if (atomic_add_unless(&ppgtt->pin_count, 1, 0)) return 0; + if (mutex_lock_interruptible(&ppgtt->pin_mutex)) + return -EINTR; + /* * PPGTT PDEs reside in the GGTT and consists of 512 entries. The * allocator works in address space sizes, so it's multiplied by page * size. We allocate at the top of the GTT to avoid fragmentation. */ - err = i915_vma_pin(ppgtt->vma, - 0, GEN6_PD_ALIGN, - PIN_GLOBAL | PIN_HIGH); - if (err) - goto unpin; - - return 0; + if (!atomic_read(&ppgtt->pin_count)) { + err = i915_vma_pin(ppgtt->vma, + 0, GEN6_PD_ALIGN, + PIN_GLOBAL | PIN_HIGH); + } + if (!err) + atomic_inc(&ppgtt->pin_count); + mutex_unlock(&ppgtt->pin_mutex); -unpin: - ppgtt->pin_count = 0; return err; } @@ -1936,22 +1932,20 @@ void gen6_ppgtt_unpin(struct i915_ppgtt *base) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); - GEM_BUG_ON(!ppgtt->pin_count); - if (--ppgtt->pin_count) - return; - - i915_vma_unpin(ppgtt->vma); + GEM_BUG_ON(!atomic_read(&ppgtt->pin_count)); + if (atomic_dec_and_test(&ppgtt->pin_count)) + i915_vma_unpin(ppgtt->vma); } void gen6_ppgtt_unpin_all(struct i915_ppgtt *base) { struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(base); - if (!ppgtt->pin_count) + if (!atomic_read(&ppgtt->pin_count)) return; - ppgtt->pin_count = 0; i915_vma_unpin(ppgtt->vma); + atomic_set(&ppgtt->pin_count, 0); } static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) @@ -1964,9 +1958,12 @@ static struct i915_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915) if (!ppgtt) return ERR_PTR(-ENOMEM); + mutex_init(&ppgtt->pin_mutex); + ppgtt_init(&ppgtt->base, &i915->gt); ppgtt->base.vm.top = 1; + ppgtt->base.vm.bind_async_flags = I915_VMA_LOCAL_BIND; ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range; ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range; ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries; @@ -2023,7 +2020,7 @@ static void gtt_write_workarounds(struct intel_gt *gt) intel_uncore_write(uncore, GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); - else if (INTEL_GEN(i915) >= 9) + else if (INTEL_GEN(i915) >= 9 && INTEL_GEN(i915) <= 11) intel_uncore_write(uncore, GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); @@ -2202,7 +2199,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm, gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE; - for_each_sgt_dma(addr, sgt_iter, vma->pages) + for_each_sgt_daddr(addr, sgt_iter, vma->pages) gen8_set_pte(gtt_entries++, pte_encode | addr); /* @@ -2243,7 +2240,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm, unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE; struct sgt_iter iter; dma_addr_t addr; - for_each_sgt_dma(addr, iter, vma->pages) + for_each_sgt_daddr(addr, iter, vma->pages) iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); /* @@ -2448,7 +2445,7 @@ static int ggtt_bind_vma(struct i915_vma *vma, * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally * upgrade to both bound if we bind either to avoid double-binding. */ - vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; + atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); return 0; } @@ -2478,14 +2475,18 @@ static int aliasing_gtt_bind_vma(struct i915_vma *vma, if (flags & I915_VMA_LOCAL_BIND) { struct i915_ppgtt *alias = i915_vm_to_ggtt(vma->vm)->alias; - if (!(vma->flags & I915_VMA_LOCAL_BIND)) { + if (flags & I915_VMA_ALLOC) { ret = alias->vm.allocate_va_range(&alias->vm, vma->node.start, vma->size); if (ret) return ret; + + set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); } + GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, + __i915_vma_flags(vma))); alias->vm.insert_entries(&alias->vm, vma, cache_level, pte_flags); } @@ -2506,7 +2507,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) { struct drm_i915_private *i915 = vma->vm->i915; - if (vma->flags & I915_VMA_GLOBAL_BIND) { + if (i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) { struct i915_address_space *vm = vma->vm; intel_wakeref_t wakeref; @@ -2514,7 +2515,7 @@ static void aliasing_gtt_unbind_vma(struct i915_vma *vma) vm->clear_range(vm, vma->node.start, vma->size); } - if (vma->flags & I915_VMA_LOCAL_BIND) { + if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) { struct i915_address_space *vm = &i915_vm_to_ggtt(vma->vm)->alias->vm; @@ -2530,7 +2531,9 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, struct i915_ggtt *ggtt = &dev_priv->ggtt; if (unlikely(ggtt->do_idle_maps)) { - if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) { + /* XXX This does not prevent more requests being submitted! */ + if (intel_gt_retire_requests_timeout(ggtt->vm.gt, + -MAX_SCHEDULE_TIMEOUT)) { DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); /* Wait a bit, in hopes it avoids the hang */ udelay(10); @@ -2555,12 +2558,12 @@ static int ggtt_set_pages(struct i915_vma *vma) return 0; } -static void i915_gtt_color_adjust(const struct drm_mm_node *node, - unsigned long color, - u64 *start, - u64 *end) +static void i915_ggtt_color_adjust(const struct drm_mm_node *node, + unsigned long color, + u64 *start, + u64 *end) { - if (node->allocated && node->color != color) + if (i915_node_color_differs(node, color)) *start += I915_GTT_PAGE_SIZE; /* Also leave a space between the unallocated reserved node after the @@ -2598,6 +2601,7 @@ static int init_aliasing_ppgtt(struct i915_ggtt *ggtt) goto err_ppgtt; ggtt->alias = ppgtt; + ggtt->vm.bind_async_flags |= ppgtt->vm.bind_async_flags; GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma); ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma; @@ -2614,22 +2618,16 @@ err_ppgtt: static void fini_aliasing_ppgtt(struct i915_ggtt *ggtt) { - struct drm_i915_private *i915 = ggtt->vm.i915; struct i915_ppgtt *ppgtt; - mutex_lock(&i915->drm.struct_mutex); - ppgtt = fetch_and_zero(&ggtt->alias); if (!ppgtt) - goto out; + return; i915_vm_put(&ppgtt->vm); ggtt->vm.vma_ops.bind_vma = ggtt_bind_vma; ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma; - -out: - mutex_unlock(&i915->drm.struct_mutex); } static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt) @@ -2661,7 +2659,8 @@ static void ggtt_release_guc_top(struct i915_ggtt *ggtt) static void cleanup_init_ggtt(struct i915_ggtt *ggtt) { ggtt_release_guc_top(ggtt); - drm_mm_remove_node(&ggtt->error_capture); + if (drm_mm_node_allocated(&ggtt->error_capture)) + drm_mm_remove_node(&ggtt->error_capture); } static int init_ggtt(struct i915_ggtt *ggtt) @@ -2692,13 +2691,15 @@ static int init_ggtt(struct i915_ggtt *ggtt) if (ret) return ret; - /* Reserve a mappable slot for our lockless error capture */ - ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture, - PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, - 0, ggtt->mappable_end, - DRM_MM_INSERT_LOW); - if (ret) - return ret; + if (ggtt->mappable_end) { + /* Reserve a mappable slot for our lockless error capture */ + ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture, + PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, + 0, ggtt->mappable_end, + DRM_MM_INSERT_LOW); + if (ret) + return ret; + } /* * The upper portion of the GuC address space has a sizeable hole @@ -2746,35 +2747,33 @@ int i915_init_ggtt(struct drm_i915_private *i915) static void ggtt_cleanup_hw(struct i915_ggtt *ggtt) { - struct drm_i915_private *i915 = ggtt->vm.i915; struct i915_vma *vma, *vn; - ggtt->vm.closed = true; + atomic_set(&ggtt->vm.open, 0); rcu_barrier(); /* flush the RCU'ed__i915_vm_release */ - flush_workqueue(i915->wq); + flush_workqueue(ggtt->vm.i915->wq); - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(&ggtt->vm.mutex); list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) - WARN_ON(i915_vma_unbind(vma)); + WARN_ON(__i915_vma_unbind(vma)); if (drm_mm_node_allocated(&ggtt->error_capture)) drm_mm_remove_node(&ggtt->error_capture); ggtt_release_guc_top(ggtt); - - if (drm_mm_initialized(&ggtt->vm.mm)) { - intel_vgt_deballoon(ggtt); - i915_address_space_fini(&ggtt->vm); - } + intel_vgt_deballoon(ggtt); ggtt->vm.cleanup(&ggtt->vm); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(&ggtt->vm.mutex); + i915_address_space_fini(&ggtt->vm); arch_phys_wc_del(ggtt->mtrr); - io_mapping_fini(&ggtt->iomap); + + if (ggtt->iomap.size) + io_mapping_fini(&ggtt->iomap); } /** @@ -2794,8 +2793,6 @@ void i915_ggtt_driver_release(struct drm_i915_private *i915) set_pages_array_wb(pvec->pages, pvec->nr); __pagevec_release(pvec); } - - i915_gem_cleanup_stolen(i915); } static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) @@ -2873,35 +2870,51 @@ static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) return 0; } -static void tgl_setup_private_ppat(struct drm_i915_private *dev_priv) +static void tgl_setup_private_ppat(struct intel_uncore *uncore) { /* TGL doesn't support LLC or AGE settings */ - I915_WRITE(GEN12_PAT_INDEX(0), GEN8_PPAT_WB); - I915_WRITE(GEN12_PAT_INDEX(1), GEN8_PPAT_WC); - I915_WRITE(GEN12_PAT_INDEX(2), GEN8_PPAT_WT); - I915_WRITE(GEN12_PAT_INDEX(3), GEN8_PPAT_UC); - I915_WRITE(GEN12_PAT_INDEX(4), GEN8_PPAT_WB); - I915_WRITE(GEN12_PAT_INDEX(5), GEN8_PPAT_WB); - I915_WRITE(GEN12_PAT_INDEX(6), GEN8_PPAT_WB); - I915_WRITE(GEN12_PAT_INDEX(7), GEN8_PPAT_WB); -} - -static void cnl_setup_private_ppat(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN10_PAT_INDEX(0), GEN8_PPAT_WB | GEN8_PPAT_LLC); - I915_WRITE(GEN10_PAT_INDEX(1), GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); - I915_WRITE(GEN10_PAT_INDEX(2), GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); - I915_WRITE(GEN10_PAT_INDEX(3), GEN8_PPAT_UC); - I915_WRITE(GEN10_PAT_INDEX(4), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); - I915_WRITE(GEN10_PAT_INDEX(5), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); - I915_WRITE(GEN10_PAT_INDEX(6), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); - I915_WRITE(GEN10_PAT_INDEX(7), GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); + intel_uncore_write(uncore, GEN12_PAT_INDEX(0), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(1), GEN8_PPAT_WC); + intel_uncore_write(uncore, GEN12_PAT_INDEX(2), GEN8_PPAT_WT); + intel_uncore_write(uncore, GEN12_PAT_INDEX(3), GEN8_PPAT_UC); + intel_uncore_write(uncore, GEN12_PAT_INDEX(4), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(5), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(6), GEN8_PPAT_WB); + intel_uncore_write(uncore, GEN12_PAT_INDEX(7), GEN8_PPAT_WB); +} + +static void cnl_setup_private_ppat(struct intel_uncore *uncore) +{ + intel_uncore_write(uncore, + GEN10_PAT_INDEX(0), + GEN8_PPAT_WB | GEN8_PPAT_LLC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(1), + GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(2), + GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(3), + GEN8_PPAT_UC); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(4), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(5), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(6), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)); + intel_uncore_write(uncore, + GEN10_PAT_INDEX(7), + GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); } /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability * bits. When using advanced contexts each context stores its own PAT, but * writing this data shouldn't be harmful even in those cases. */ -static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) +static void bdw_setup_private_ppat(struct intel_uncore *uncore) { u64 pat; @@ -2914,11 +2927,11 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); - I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); - I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); } -static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) +static void chv_setup_private_ppat(struct intel_uncore *uncore) { u64 pat; @@ -2950,8 +2963,8 @@ static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) GEN8_PPAT(6, CHV_PPAT_SNOOP) | GEN8_PPAT(7, CHV_PPAT_SNOOP); - I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); - I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_LO, lower_32_bits(pat)); + intel_uncore_write(uncore, GEN8_PRIVATE_PAT_HI, upper_32_bits(pat)); } static void gen6_gmch_remove(struct i915_address_space *vm) @@ -2962,18 +2975,26 @@ static void gen6_gmch_remove(struct i915_address_space *vm) cleanup_scratch_page(vm); } -static void setup_private_pat(struct drm_i915_private *dev_priv) +static void setup_private_pat(struct intel_uncore *uncore) { - GEM_BUG_ON(INTEL_GEN(dev_priv) < 8); + struct drm_i915_private *i915 = uncore->i915; - if (INTEL_GEN(dev_priv) >= 12) - tgl_setup_private_ppat(dev_priv); - else if (INTEL_GEN(dev_priv) >= 10) - cnl_setup_private_ppat(dev_priv); - else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) - chv_setup_private_ppat(dev_priv); + GEM_BUG_ON(INTEL_GEN(i915) < 8); + + if (INTEL_GEN(i915) >= 12) + tgl_setup_private_ppat(uncore); + else if (INTEL_GEN(i915) >= 10) + cnl_setup_private_ppat(uncore); + else if (IS_CHERRYVIEW(i915) || IS_GEN9_LP(i915)) + chv_setup_private_ppat(uncore); else - bdw_setup_private_ppat(dev_priv); + bdw_setup_private_ppat(uncore); +} + +static struct resource pci_resource(struct pci_dev *pdev, int bar) +{ + return (struct resource)DEFINE_RES_MEM(pci_resource_start(pdev, bar), + pci_resource_len(pdev, bar)); } static int gen8_gmch_probe(struct i915_ggtt *ggtt) @@ -2985,10 +3006,10 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) int err; /* TODO: We're not aware of mappable constraints on gen8 yet */ - ggtt->gmadr = - (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2), - pci_resource_len(pdev, 2)); - ggtt->mappable_end = resource_size(&ggtt->gmadr); + if (!IS_DGFX(dev_priv)) { + ggtt->gmadr = pci_resource(pdev, 2); + ggtt->mappable_end = resource_size(&ggtt->gmadr); + } err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); if (!err) @@ -3029,7 +3050,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt) ggtt->vm.pte_encode = gen8_pte_encode; - setup_private_pat(dev_priv); + setup_private_pat(ggtt->vm.gt->uncore); return ggtt_probe_common(ggtt, size); } @@ -3200,9 +3221,6 @@ int i915_ggtt_probe_hw(struct drm_i915_private *i915) static int ggtt_init_hw(struct i915_ggtt *ggtt) { struct drm_i915_private *i915 = ggtt->vm.i915; - int ret = 0; - - mutex_lock(&i915->drm.struct_mutex); i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); @@ -3212,24 +3230,23 @@ static int ggtt_init_hw(struct i915_ggtt *ggtt) ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); if (!HAS_LLC(i915) && !HAS_PPGTT(i915)) - ggtt->vm.mm.color_adjust = i915_gtt_color_adjust; - - if (!io_mapping_init_wc(&ggtt->iomap, - ggtt->gmadr.start, - ggtt->mappable_end)) { - ggtt->vm.cleanup(&ggtt->vm); - ret = -EIO; - goto out; - } + ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; + + if (ggtt->mappable_end) { + if (!io_mapping_init_wc(&ggtt->iomap, + ggtt->gmadr.start, + ggtt->mappable_end)) { + ggtt->vm.cleanup(&ggtt->vm); + return -EIO; + } - ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end); + ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, + ggtt->mappable_end); + } i915_ggtt_init_fences(ggtt); -out: - mutex_unlock(&i915->drm.struct_mutex); - - return ret; + return 0; } /** @@ -3251,19 +3268,7 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) if (ret) return ret; - /* - * Initialise stolen early so that we may reserve preallocated - * objects for the BIOS to KMS transition. - */ - ret = i915_gem_init_stolen(dev_priv); - if (ret) - goto out_gtt_cleanup; - return 0; - -out_gtt_cleanup: - dev_priv->ggtt.vm.cleanup(&dev_priv->ggtt.vm); - return ret; } int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) @@ -3301,6 +3306,7 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt) { struct i915_vma *vma, *vn; bool flush = false; + int open; intel_gt_check_and_clear_faults(ggtt->vm.gt); @@ -3308,33 +3314,31 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt) /* First fill our portion of the GTT with scratch pages */ ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total); - ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */ + + /* Skip rewriting PTE on VMA unbind. */ + open = atomic_xchg(&ggtt->vm.open, 0); /* clflush objects bound into the GGTT and rebind them. */ list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) { struct drm_i915_gem_object *obj = vma->obj; - if (!(vma->flags & I915_VMA_GLOBAL_BIND)) + if (!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)) continue; - mutex_unlock(&ggtt->vm.mutex); - - if (!i915_vma_unbind(vma)) - goto lock; + if (!__i915_vma_unbind(vma)) + continue; + clear_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma)); WARN_ON(i915_vma_bind(vma, obj ? obj->cache_level : 0, - PIN_UPDATE)); + PIN_GLOBAL, NULL)); if (obj) { /* only used during resume => exclusive access */ flush |= fetch_and_zero(&obj->write_domain); obj->read_domains |= I915_GEM_DOMAIN_GTT; } - -lock: - mutex_lock(&ggtt->vm.mutex); } - ggtt->vm.closed = false; + atomic_set(&ggtt->vm.open, open); ggtt->invalidate(ggtt); mutex_unlock(&ggtt->vm.mutex); @@ -3345,10 +3349,12 @@ lock: void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915) { - ggtt_restore_mappings(&i915->ggtt); + struct i915_ggtt *ggtt = &i915->ggtt; + + ggtt_restore_mappings(ggtt); if (INTEL_GEN(i915) >= 8) - setup_private_pat(i915); + setup_private_pat(ggtt->vm.gt->uncore); } static struct scatterlist * @@ -3726,7 +3732,8 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, u64 offset; int err; - lockdep_assert_held(&vm->i915->drm.struct_mutex); + lockdep_assert_held(&vm->mutex); + GEM_BUG_ON(!size); GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); GEM_BUG_ON(alignment && !is_power_of_2(alignment)); diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h index b97a47fc7a68..402283ce2864 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.h +++ b/drivers/gpu/drm/i915/i915_gem_gtt.h @@ -148,8 +148,8 @@ typedef u64 gen8_pte_t; #define GEN8_PDE_IPS_64K BIT(11) #define GEN8_PDE_PS_2M BIT(7) -#define for_each_sgt_dma(__dmap, __iter, __sgt) \ - __for_each_sgt_dma(__dmap, __iter, __sgt, I915_GTT_PAGE_SIZE) +#define for_each_sgt_daddr(__dp, __iter, __sgt) \ + __for_each_sgt_daddr(__dp, __iter, __sgt, I915_GTT_PAGE_SIZE) struct intel_remapped_plane_info { /* in gtt pages */ @@ -305,7 +305,16 @@ struct i915_address_space { u64 total; /* size addr space maps (ex. 2GB for ggtt) */ u64 reserved; /* size addr space reserved */ - bool closed; + unsigned int bind_async_flags; + + /* + * Each active user context has its own address space (in full-ppgtt). + * Since the vm may be shared between multiple contexts, we count how + * many contexts keep us "open". Once open hits zero, we are closed + * and do not allow any new attachments, and proceed to shutdown our + * vma and page directories. + */ + atomic_t open; struct mutex mutex; /* protects vma and our lists */ #define VM_CLASS_GGTT 0 @@ -320,11 +329,6 @@ struct i915_address_space { */ struct list_head bound_list; - /** - * List of vma that are not unbound. - */ - struct list_head unbound_list; - struct pagestash free_pages; /* Global GTT */ @@ -376,6 +380,12 @@ i915_vm_has_scratch_64K(struct i915_address_space *vm) return vm->scratch_order == get_order(I915_GTT_PAGE_SIZE_64K); } +static inline bool +i915_vm_has_cache_coloring(struct i915_address_space *vm) +{ + return i915_is_ggtt(vm) && vm->mm.color_adjust; +} + /* The Graphics Translation Table is the way in which GEN hardware translates a * Graphics Virtual Address into a Physical Address. In addition to the normal * collateral associated with any va->pa translations GEN hardware also has a @@ -401,6 +411,11 @@ struct i915_ggtt { int mtrr; + /** Bit 6 swizzling required for X tiling */ + u32 bit_6_swizzle_x; + /** Bit 6 swizzling required for Y tiling */ + u32 bit_6_swizzle_y; + u32 pin_bias; unsigned int num_fences; @@ -422,7 +437,6 @@ struct i915_ggtt { struct i915_ppgtt { struct i915_address_space vm; - intel_engine_mask_t pd_dirty_engines; struct i915_page_directory *pd; }; @@ -432,7 +446,9 @@ struct gen6_ppgtt { struct i915_vma *vma; gen6_pte_t __iomem *pd_addr; - unsigned int pin_count; + atomic_t pin_count; + struct mutex pin_mutex; + bool scan_for_unused_pt; }; @@ -559,6 +575,11 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt); int i915_init_ggtt(struct drm_i915_private *dev_priv); void i915_ggtt_driver_release(struct drm_i915_private *dev_priv); +static inline bool i915_ggtt_has_aperture(const struct i915_ggtt *ggtt) +{ + return ggtt->mappable_end > 0; +} + int i915_ppgtt_init_hw(struct intel_gt *gt); struct i915_ppgtt *i915_ppgtt_create(struct drm_i915_private *dev_priv); @@ -577,6 +598,35 @@ static inline void i915_vm_put(struct i915_address_space *vm) kref_put(&vm->ref, i915_vm_release); } +static inline struct i915_address_space * +i915_vm_open(struct i915_address_space *vm) +{ + GEM_BUG_ON(!atomic_read(&vm->open)); + atomic_inc(&vm->open); + return i915_vm_get(vm); +} + +static inline bool +i915_vm_tryopen(struct i915_address_space *vm) +{ + if (atomic_add_unless(&vm->open, 1, 0)) + return i915_vm_get(vm); + + return false; +} + +void __i915_vm_close(struct i915_address_space *vm); + +static inline void +i915_vm_close(struct i915_address_space *vm) +{ + GEM_BUG_ON(!atomic_read(&vm->open)); + if (atomic_dec_and_test(&vm->open)) + __i915_vm_close(vm); + + i915_vm_put(vm); +} + int gen6_ppgtt_pin(struct i915_ppgtt *base); void gen6_ppgtt_unpin(struct i915_ppgtt *base); void gen6_ppgtt_unpin_all(struct i915_ppgtt *base); @@ -609,10 +659,9 @@ int i915_gem_gtt_insert(struct i915_address_space *vm, #define PIN_OFFSET_BIAS BIT_ULL(6) #define PIN_OFFSET_FIXED BIT_ULL(7) -#define PIN_MBZ BIT_ULL(8) /* I915_VMA_PIN_OVERFLOW */ -#define PIN_GLOBAL BIT_ULL(9) /* I915_VMA_GLOBAL_BIND */ -#define PIN_USER BIT_ULL(10) /* I915_VMA_LOCAL_BIND */ -#define PIN_UPDATE BIT_ULL(11) +#define PIN_UPDATE BIT_ULL(9) +#define PIN_GLOBAL BIT_ULL(10) /* I915_VMA_GLOBAL_BIND */ +#define PIN_USER BIT_ULL(11) /* I915_VMA_LOCAL_BIND */ #define PIN_OFFSET_MASK (-I915_GTT_PAGE_SIZE) diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 9f1517af5b7f..cf8a8c3ef047 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -5,6 +5,7 @@ #include "gt/intel_engine_user.h" #include "i915_drv.h" +#include "i915_perf.h" int i915_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) @@ -79,8 +80,8 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, break; case I915_PARAM_HAS_GPU_RESET: value = i915_modparams.enable_hangcheck && - intel_has_gpu_reset(i915); - if (value && intel_has_reset_engine(i915)) + intel_has_gpu_reset(&i915->gt); + if (value && intel_has_reset_engine(&i915->gt)) value = 2; break; case I915_PARAM_HAS_RESOURCE_STREAMER: @@ -156,6 +157,9 @@ int i915_getparam_ioctl(struct drm_device *dev, void *data, case I915_PARAM_MMAP_GTT_COHERENT: value = INTEL_INFO(i915)->has_coherent_ggtt; break; + case I915_PARAM_PERF_REVISION: + value = i915_perf_ioctl_version(); + break; default: DRM_DEBUG("Unknown parameter %d\n", param->param); return -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index e284bd76fa86..3c85cb0ee99f 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -40,6 +40,7 @@ #include "display/intel_overlay.h" #include "gem/i915_gem_context.h" +#include "gem/i915_gem_lmem.h" #include "i915_drv.h" #include "i915_gpu_error.h" @@ -235,6 +236,7 @@ struct compress { struct pagevec pool; struct z_stream_s zstream; void *tmp; + bool wc; }; static bool compress_init(struct compress *c) @@ -292,7 +294,7 @@ static int compress_page(struct compress *c, struct z_stream_s *zstream = &c->zstream; zstream->next_in = src; - if (c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) + if (c->wc && c->tmp && i915_memcpy_from_wc(c->tmp, src, PAGE_SIZE)) zstream->next_in = c->tmp; zstream->avail_in = PAGE_SIZE; @@ -367,6 +369,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) struct compress { struct pagevec pool; + bool wc; }; static bool compress_init(struct compress *c) @@ -389,7 +392,7 @@ static int compress_page(struct compress *c, if (!ptr) return -ENOMEM; - if (!i915_memcpy_from_wc(ptr, src, PAGE_SIZE)) + if (!(c->wc && i915_memcpy_from_wc(ptr, src, PAGE_SIZE))) memcpy(ptr, src, PAGE_SIZE); dst->pages[dst->page_count++] = ptr; @@ -421,6 +424,7 @@ static void err_compression_marker(struct drm_i915_error_state_buf *m) static void error_print_instdone(struct drm_i915_error_state_buf *m, const struct drm_i915_error_engine *ee) { + const struct sseu_dev_info *sseu = &RUNTIME_INFO(m->i915)->sseu; int slice; int subslice; @@ -436,12 +440,12 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m, if (INTEL_GEN(m->i915) <= 6) return; - for_each_instdone_slice_subslice(m->i915, slice, subslice) + for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) err_printf(m, " SAMPLER_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, ee->instdone.sampler[slice][subslice]); - for_each_instdone_slice_subslice(m->i915, slice, subslice) + for_each_instdone_slice_subslice(m->i915, sseu, slice, subslice) err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n", slice, subslice, ee->instdone.row[slice][subslice]); @@ -470,9 +474,9 @@ static void error_print_context(struct drm_i915_error_state_buf *m, const char *header, const struct drm_i915_error_context *ctx) { - err_printf(m, "%s%s[%d] hw_id %d, prio %d, guilty %d active %d\n", - header, ctx->comm, ctx->pid, ctx->hw_id, - ctx->sched_attr.priority, ctx->guilty, ctx->active); + err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n", + header, ctx->comm, ctx->pid, ctx->sched_attr.priority, + ctx->guilty, ctx->active); } static void error_print_engine(struct drm_i915_error_state_buf *m, @@ -533,10 +537,6 @@ static void error_print_engine(struct drm_i915_error_state_buf *m, } err_printf(m, " ring->head: 0x%08x\n", ee->cpu_ring_head); err_printf(m, " ring->tail: 0x%08x\n", ee->cpu_ring_tail); - err_printf(m, " hangcheck timestamp: %dms (%lu%s)\n", - jiffies_to_msecs(ee->hangcheck_timestamp - epoch), - ee->hangcheck_timestamp, - ee->hangcheck_timestamp == epoch ? "; epoch" : ""); err_printf(m, " engine reset count: %u\n", ee->reset_count); for (n = 0; n < ee->num_ports; n++) { @@ -574,6 +574,9 @@ static void print_error_obj(struct drm_i915_error_state_buf *m, lower_32_bits(obj->gtt_offset)); } + if (obj->gtt_page_sizes > I915_GTT_PAGE_SIZE_4K) + err_printf(m, "gtt_page_sizes = 0x%08x\n", obj->gtt_page_sizes); + err_compression_marker(m); for (page = 0; page < obj->page_count; page++) { int i, len; @@ -675,11 +678,8 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, ts = ktime_to_timespec64(error->uptime); err_printf(m, "Uptime: %lld s %ld us\n", (s64)ts.tv_sec, ts.tv_nsec / NSEC_PER_USEC); - err_printf(m, "Epoch: %lu jiffies (%u HZ)\n", error->epoch, HZ); - err_printf(m, "Capture: %lu jiffies; %d ms ago, %d ms after epoch\n", - error->capture, - jiffies_to_msecs(jiffies - error->capture), - jiffies_to_msecs(error->capture - error->epoch)); + err_printf(m, "Capture: %lu jiffies; %d ms ago\n", + error->capture, jiffies_to_msecs(jiffies - error->capture)); for (ee = error->engine; ee; ee = ee->next) err_printf(m, "Active process (on ring %s): %s [%d]\n", @@ -734,8 +734,24 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, if (IS_GEN(m->i915, 7)) err_printf(m, "ERR_INT: 0x%08x\n", error->err_int); + if (IS_GEN_RANGE(m->i915, 8, 11)) + err_printf(m, "GTT_CACHE_EN: 0x%08x\n", error->gtt_cache); + + if (IS_GEN(m->i915, 12)) + err_printf(m, "AUX_ERR_DBG: 0x%08x\n", error->aux_err); + + if (INTEL_GEN(m->i915) >= 12) { + int i; + + for (i = 0; i < GEN12_SFC_DONE_MAX; i++) + err_printf(m, " SFC_DONE[%d]: 0x%08x\n", i, + error->sfc_done[i]); + + err_printf(m, " GAM_DONE: 0x%08x\n", error->gam_done); + } + for (ee = error->engine; ee; ee = ee->next) - error_print_engine(m, ee, error->epoch); + error_print_engine(m, ee, error->capture); for (ee = error->engine; ee; ee = ee->next) { const struct drm_i915_error_object *obj; @@ -763,7 +779,7 @@ static void __err_print_to_sgl(struct drm_i915_error_state_buf *m, for (j = 0; j < ee->num_requests; j++) error_print_request(m, " ", &ee->requests[j], - error->epoch); + error->capture); } print_error_obj(m, ee->engine, "ringbuffer", ee->ringbuffer); @@ -963,7 +979,6 @@ i915_error_object_create(struct drm_i915_private *i915, struct drm_i915_error_object *dst; unsigned long num_pages; struct sgt_iter iter; - dma_addr_t dma; int ret; might_sleep(); @@ -984,21 +999,59 @@ i915_error_object_create(struct drm_i915_private *i915, dst->gtt_offset = vma->node.start; dst->gtt_size = vma->node.size; + dst->gtt_page_sizes = vma->page_sizes.gtt; dst->num_pages = num_pages; dst->page_count = 0; dst->unused = 0; + compress->wc = i915_gem_object_is_lmem(vma->obj) || + drm_mm_node_allocated(&ggtt->error_capture); + ret = -EINVAL; - for_each_sgt_dma(dma, iter, vma->pages) { + if (drm_mm_node_allocated(&ggtt->error_capture)) { void __iomem *s; + dma_addr_t dma; - ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0); + for_each_sgt_daddr(dma, iter, vma->pages) { + ggtt->vm.insert_page(&ggtt->vm, dma, slot, + I915_CACHE_NONE, 0); - s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); - ret = compress_page(compress, (void __force *)s, dst); - io_mapping_unmap(s); - if (ret) - break; + s = io_mapping_map_wc(&ggtt->iomap, slot, PAGE_SIZE); + ret = compress_page(compress, (void __force *)s, dst); + io_mapping_unmap(s); + if (ret) + break; + } + } else if (i915_gem_object_is_lmem(vma->obj)) { + struct intel_memory_region *mem = vma->obj->mm.region; + dma_addr_t dma; + + for_each_sgt_daddr(dma, iter, vma->pages) { + void __iomem *s; + + s = io_mapping_map_wc(&mem->iomap, dma, PAGE_SIZE); + ret = compress_page(compress, (void __force *)s, dst); + io_mapping_unmap(s); + if (ret) + break; + } + } else { + struct page *page; + + for_each_sgt_page(page, iter, vma->pages) { + void *s; + + drm_clflush_pages(&page, 1); + + s = kmap(page); + ret = compress_page(compress, s, dst); + kunmap(s); + + drm_clflush_pages(&page, 1); + + if (ret) + break; + } } if (ret || compress_flush(compress, dst)) { @@ -1136,8 +1189,6 @@ static void error_record_engine_registers(struct i915_gpu_state *error, } ee->idle = intel_engine_is_idle(engine); - if (!ee->idle) - ee->hangcheck_timestamp = engine->hangcheck.action_timestamp; ee->reset_count = i915_reset_engine_count(&dev_priv->gpu_error, engine); @@ -1263,7 +1314,6 @@ static bool record_context(struct drm_i915_error_context *e, rcu_read_unlock(); } - e->hw_id = ctx->hw_id; e->sched_attr = ctx->sched; e->guilty = atomic_read(&ctx->guilty_count); e->active = atomic_read(&ctx->active_count); @@ -1291,7 +1341,7 @@ capture_vma(struct capture_vma *next, if (!c) return next; - if (!i915_active_trygrab(&vma->active)) { + if (!i915_active_acquire_if_busy(&vma->active)) { kfree(c); return next; } @@ -1431,7 +1481,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress) *this->slot = i915_error_object_create(i915, vma, compress); - i915_active_ungrab(&vma->active); + i915_active_release(&vma->active); i915_vma_put(vma); capture = this->next; @@ -1553,6 +1603,21 @@ static void capture_reg_state(struct i915_gpu_state *error) error->gac_eco = intel_uncore_read(uncore, GAC_ECO_BITS); } + if (IS_GEN_RANGE(i915, 8, 11)) + error->gtt_cache = intel_uncore_read(uncore, HSW_GTT_CACHE_EN); + + if (IS_GEN(i915, 12)) + error->aux_err = intel_uncore_read(uncore, GEN12_AUX_ERR_DBG); + + if (INTEL_GEN(i915) >= 12) { + for (i = 0; i < GEN12_SFC_DONE_MAX; i++) { + error->sfc_done[i] = + intel_uncore_read(uncore, GEN12_SFC_DONE(i)); + } + + error->gam_done = intel_uncore_read(uncore, GEN12_GAM_DONE); + } + /* 4: Everything else */ if (INTEL_GEN(i915) >= 11) { error->ier = intel_uncore_read(uncore, GEN8_DE_MISC_IER); @@ -1647,26 +1712,15 @@ static void capture_params(struct i915_gpu_state *error) i915_params_copy(&error->params, &i915_modparams); } -static unsigned long capture_find_epoch(const struct i915_gpu_state *error) -{ - const struct drm_i915_error_engine *ee; - unsigned long epoch = error->capture; - - for (ee = error->engine; ee; ee = ee->next) { - if (ee->hangcheck_timestamp && - time_before(ee->hangcheck_timestamp, epoch)) - epoch = ee->hangcheck_timestamp; - } - - return epoch; -} - static void capture_finish(struct i915_gpu_state *error) { struct i915_ggtt *ggtt = &error->i915->ggtt; - const u64 slot = ggtt->error_capture.start; - ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); + if (drm_mm_node_allocated(&ggtt->error_capture)) { + const u64 slot = ggtt->error_capture.start; + + ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE); + } } #define DAY_AS_SECONDS(x) (24 * 60 * 60 * (x)) @@ -1712,8 +1766,6 @@ i915_capture_gpu_state(struct drm_i915_private *i915) error->overlay = intel_overlay_capture_error_state(i915); error->display = intel_display_capture_error_state(i915); - error->epoch = capture_find_epoch(error); - capture_finish(error); compress_fini(&compress); diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h index df9f57766626..5d2c3372ff99 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.h +++ b/drivers/gpu/drm/i915/i915_gpu_error.h @@ -34,7 +34,6 @@ struct i915_gpu_state { ktime_t boottime; ktime_t uptime; unsigned long capture; - unsigned long epoch; struct drm_i915_private *i915; @@ -74,6 +73,10 @@ struct i915_gpu_state { u32 gam_ecochk; u32 gab_ctl; u32 gfx_mode; + u32 gtt_cache; + u32 aux_err; /* gen12 */ + u32 sfc_done[GEN12_SFC_DONE_MAX]; /* gen12 */ + u32 gam_done; /* gen12 */ u32 nfence; u64 fence[I915_MAX_NUM_FENCES]; @@ -85,7 +88,6 @@ struct i915_gpu_state { /* Software tracked state */ bool idle; - unsigned long hangcheck_timestamp; int num_requests; u32 reset_count; @@ -118,7 +120,6 @@ struct i915_gpu_state { struct drm_i915_error_context { char comm[TASK_COMM_LEN]; pid_t pid; - u32 hw_id; int active; int guilty; struct i915_sched_attr sched_attr; @@ -127,6 +128,7 @@ struct i915_gpu_state { struct drm_i915_error_object { u64 gtt_offset; u64 gtt_size; + u32 gtt_page_sizes; int num_pages; int page_count; int unused; diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 37e3dd3c1a9d..dae00f7dd7df 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -29,7 +29,6 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/circ_buf.h> -#include <linux/cpuidle.h> #include <linux/slab.h> #include <linux/sysrq.h> @@ -46,6 +45,7 @@ #include "gt/intel_gt.h" #include "gt/intel_gt_irq.h" #include "gt/intel_gt_pm_irq.h" +#include "gt/intel_rps.h" #include "i915_drv.h" #include "i915_irq.h" @@ -149,30 +149,24 @@ static const u32 hpd_gen12[HPD_NUM_PINS] = { }; static const u32 hpd_icp[HPD_NUM_PINS] = { - [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, - [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, - [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP, - [HPD_PORT_D] = SDE_TC2_HOTPLUG_ICP, - [HPD_PORT_E] = SDE_TC3_HOTPLUG_ICP, - [HPD_PORT_F] = SDE_TC4_HOTPLUG_ICP -}; - -static const u32 hpd_mcc[HPD_NUM_PINS] = { - [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, - [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, - [HPD_PORT_C] = SDE_TC1_HOTPLUG_ICP + [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), + [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), + [HPD_PORT_C] = SDE_TC_HOTPLUG_ICP(PORT_TC1), + [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC2), + [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC3), + [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC4), }; static const u32 hpd_tgp[HPD_NUM_PINS] = { - [HPD_PORT_A] = SDE_DDIA_HOTPLUG_ICP, - [HPD_PORT_B] = SDE_DDIB_HOTPLUG_ICP, - [HPD_PORT_C] = SDE_DDIC_HOTPLUG_TGP, - [HPD_PORT_D] = SDE_TC1_HOTPLUG_ICP, - [HPD_PORT_E] = SDE_TC2_HOTPLUG_ICP, - [HPD_PORT_F] = SDE_TC3_HOTPLUG_ICP, - [HPD_PORT_G] = SDE_TC4_HOTPLUG_ICP, - [HPD_PORT_H] = SDE_TC5_HOTPLUG_TGP, - [HPD_PORT_I] = SDE_TC6_HOTPLUG_TGP, + [HPD_PORT_A] = SDE_DDI_HOTPLUG_ICP(PORT_A), + [HPD_PORT_B] = SDE_DDI_HOTPLUG_ICP(PORT_B), + [HPD_PORT_C] = SDE_DDI_HOTPLUG_ICP(PORT_C), + [HPD_PORT_D] = SDE_TC_HOTPLUG_ICP(PORT_TC1), + [HPD_PORT_E] = SDE_TC_HOTPLUG_ICP(PORT_TC2), + [HPD_PORT_F] = SDE_TC_HOTPLUG_ICP(PORT_TC3), + [HPD_PORT_G] = SDE_TC_HOTPLUG_ICP(PORT_TC4), + [HPD_PORT_H] = SDE_TC_HOTPLUG_ICP(PORT_TC5), + [HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6), }; void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr, @@ -327,180 +321,6 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv, } } -static i915_reg_t gen6_pm_iir(struct drm_i915_private *dev_priv) -{ - WARN_ON_ONCE(INTEL_GEN(dev_priv) >= 11); - - return INTEL_GEN(dev_priv) >= 8 ? GEN8_GT_IIR(2) : GEN6_PMIIR; -} - -void gen11_reset_rps_interrupts(struct drm_i915_private *dev_priv) -{ - struct intel_gt *gt = &dev_priv->gt; - - spin_lock_irq(>->irq_lock); - - while (gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM)) - ; - - dev_priv->gt_pm.rps.pm_iir = 0; - - spin_unlock_irq(>->irq_lock); -} - -void gen6_reset_rps_interrupts(struct drm_i915_private *dev_priv) -{ - struct intel_gt *gt = &dev_priv->gt; - - spin_lock_irq(>->irq_lock); - gen6_gt_pm_reset_iir(gt, GEN6_PM_RPS_EVENTS); - dev_priv->gt_pm.rps.pm_iir = 0; - spin_unlock_irq(>->irq_lock); -} - -void gen6_enable_rps_interrupts(struct drm_i915_private *dev_priv) -{ - struct intel_gt *gt = &dev_priv->gt; - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - if (READ_ONCE(rps->interrupts_enabled)) - return; - - spin_lock_irq(>->irq_lock); - WARN_ON_ONCE(rps->pm_iir); - - if (INTEL_GEN(dev_priv) >= 11) - WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GTPM)); - else - WARN_ON_ONCE(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); - - rps->interrupts_enabled = true; - gen6_gt_pm_enable_irq(gt, dev_priv->pm_rps_events); - - spin_unlock_irq(>->irq_lock); -} - -u32 gen6_sanitize_rps_pm_mask(const struct drm_i915_private *i915, u32 mask) -{ - return mask & ~i915->gt_pm.rps.pm_intrmsk_mbz; -} - -void gen6_disable_rps_interrupts(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - struct intel_gt *gt = &dev_priv->gt; - - if (!READ_ONCE(rps->interrupts_enabled)) - return; - - spin_lock_irq(>->irq_lock); - rps->interrupts_enabled = false; - - I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0u)); - - gen6_gt_pm_disable_irq(gt, GEN6_PM_RPS_EVENTS); - - spin_unlock_irq(>->irq_lock); - intel_synchronize_irq(dev_priv); - - /* Now that we will not be generating any more work, flush any - * outstanding tasks. As we are called on the RPS idle path, - * we will reset the GPU to minimum frequencies, so the current - * state of the worker can be discarded. - */ - cancel_work_sync(&rps->work); - if (INTEL_GEN(dev_priv) >= 11) - gen11_reset_rps_interrupts(dev_priv); - else - gen6_reset_rps_interrupts(dev_priv); -} - -void gen9_reset_guc_interrupts(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - assert_rpm_wakelock_held(>->i915->runtime_pm); - - spin_lock_irq(>->irq_lock); - gen6_gt_pm_reset_iir(gt, gt->pm_guc_events); - spin_unlock_irq(>->irq_lock); -} - -void gen9_enable_guc_interrupts(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - assert_rpm_wakelock_held(>->i915->runtime_pm); - - spin_lock_irq(>->irq_lock); - if (!guc->interrupts.enabled) { - WARN_ON_ONCE(intel_uncore_read(gt->uncore, - gen6_pm_iir(gt->i915)) & - gt->pm_guc_events); - guc->interrupts.enabled = true; - gen6_gt_pm_enable_irq(gt, gt->pm_guc_events); - } - spin_unlock_irq(>->irq_lock); -} - -void gen9_disable_guc_interrupts(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - assert_rpm_wakelock_held(>->i915->runtime_pm); - - spin_lock_irq(>->irq_lock); - guc->interrupts.enabled = false; - - gen6_gt_pm_disable_irq(gt, gt->pm_guc_events); - - spin_unlock_irq(>->irq_lock); - intel_synchronize_irq(gt->i915); - - gen9_reset_guc_interrupts(guc); -} - -void gen11_reset_guc_interrupts(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - spin_lock_irq(>->irq_lock); - gen11_gt_reset_one_iir(gt, 0, GEN11_GUC); - spin_unlock_irq(>->irq_lock); -} - -void gen11_enable_guc_interrupts(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - spin_lock_irq(>->irq_lock); - if (!guc->interrupts.enabled) { - u32 events = REG_FIELD_PREP(ENGINE1_MASK, GUC_INTR_GUC2HOST); - - WARN_ON_ONCE(gen11_gt_reset_one_iir(gt, 0, GEN11_GUC)); - intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, events); - intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~events); - guc->interrupts.enabled = true; - } - spin_unlock_irq(>->irq_lock); -} - -void gen11_disable_guc_interrupts(struct intel_guc *guc) -{ - struct intel_gt *gt = guc_to_gt(guc); - - spin_lock_irq(>->irq_lock); - guc->interrupts.enabled = false; - - intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_MASK, ~0); - intel_uncore_write(gt->uncore, GEN11_GUC_SG_INTR_ENABLE, 0); - - spin_unlock_irq(>->irq_lock); - intel_synchronize_irq(gt->i915); - - gen11_reset_guc_interrupts(guc); -} - /** * bdw_update_port_irq - update DE port interrupt * @dev_priv: driver private @@ -942,14 +762,14 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) return (position + crtc->scanline_offset) % vtotal; } -bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, +bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index, bool in_vblank_irq, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime, const struct drm_display_mode *mode) { struct drm_i915_private *dev_priv = to_i915(dev); - struct intel_crtc *intel_crtc = intel_get_crtc_for_pipe(dev_priv, - pipe); + struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index)); + enum pipe pipe = crtc->pipe; int position; int vbl_start, vbl_end, hsync_start, htotal, vtotal; unsigned long irqflags; @@ -992,7 +812,7 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, /* No obvious pixelcount register. Only query vertical * scanout position from Display scan line register. */ - position = __intel_get_crtc_scanline(intel_crtc); + position = __intel_get_crtc_scanline(crtc); } else { /* Have access to pixelcount since start of frame. * We can split this into vertical and horizontal @@ -1072,199 +892,6 @@ int intel_get_crtc_scanline(struct intel_crtc *crtc) return position; } -static void ironlake_rps_change_irq_handler(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - u32 busy_up, busy_down, max_avg, min_avg; - u8 new_delay; - - spin_lock(&mchdev_lock); - - intel_uncore_write16(uncore, - MEMINTRSTS, - intel_uncore_read(uncore, MEMINTRSTS)); - - new_delay = dev_priv->ips.cur_delay; - - intel_uncore_write16(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); - busy_up = intel_uncore_read(uncore, RCPREVBSYTUPAVG); - busy_down = intel_uncore_read(uncore, RCPREVBSYTDNAVG); - max_avg = intel_uncore_read(uncore, RCBMAXAVG); - min_avg = intel_uncore_read(uncore, RCBMINAVG); - - /* Handle RCS change request from hw */ - if (busy_up > max_avg) { - if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) - new_delay = dev_priv->ips.cur_delay - 1; - if (new_delay < dev_priv->ips.max_delay) - new_delay = dev_priv->ips.max_delay; - } else if (busy_down < min_avg) { - if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) - new_delay = dev_priv->ips.cur_delay + 1; - if (new_delay > dev_priv->ips.min_delay) - new_delay = dev_priv->ips.min_delay; - } - - if (ironlake_set_drps(dev_priv, new_delay)) - dev_priv->ips.cur_delay = new_delay; - - spin_unlock(&mchdev_lock); - - return; -} - -static void vlv_c0_read(struct drm_i915_private *dev_priv, - struct intel_rps_ei *ei) -{ - ei->ktime = ktime_get_raw(); - ei->render_c0 = I915_READ(VLV_RENDER_C0_COUNT); - ei->media_c0 = I915_READ(VLV_MEDIA_C0_COUNT); -} - -void gen6_rps_reset_ei(struct drm_i915_private *dev_priv) -{ - memset(&dev_priv->gt_pm.rps.ei, 0, sizeof(dev_priv->gt_pm.rps.ei)); -} - -static u32 vlv_wa_c0_ei(struct drm_i915_private *dev_priv, u32 pm_iir) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - const struct intel_rps_ei *prev = &rps->ei; - struct intel_rps_ei now; - u32 events = 0; - - if ((pm_iir & GEN6_PM_RP_UP_EI_EXPIRED) == 0) - return 0; - - vlv_c0_read(dev_priv, &now); - - if (prev->ktime) { - u64 time, c0; - u32 render, media; - - time = ktime_us_delta(now.ktime, prev->ktime); - - time *= dev_priv->czclk_freq; - - /* Workload can be split between render + media, - * e.g. SwapBuffers being blitted in X after being rendered in - * mesa. To account for this we need to combine both engines - * into our activity counter. - */ - render = now.render_c0 - prev->render_c0; - media = now.media_c0 - prev->media_c0; - c0 = max(render, media); - c0 *= 1000 * 100 << 8; /* to usecs and scale to threshold% */ - - if (c0 > time * rps->power.up_threshold) - events = GEN6_PM_RP_UP_THRESHOLD; - else if (c0 < time * rps->power.down_threshold) - events = GEN6_PM_RP_DOWN_THRESHOLD; - } - - rps->ei = now; - return events; -} - -static void gen6_pm_rps_work(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, gt_pm.rps.work); - struct intel_gt *gt = &dev_priv->gt; - struct intel_rps *rps = &dev_priv->gt_pm.rps; - bool client_boost = false; - int new_delay, adj, min, max; - u32 pm_iir = 0; - - spin_lock_irq(>->irq_lock); - if (rps->interrupts_enabled) { - pm_iir = fetch_and_zero(&rps->pm_iir); - client_boost = atomic_read(&rps->num_waiters); - } - spin_unlock_irq(>->irq_lock); - - /* Make sure we didn't queue anything we're not going to process. */ - WARN_ON(pm_iir & ~dev_priv->pm_rps_events); - if ((pm_iir & dev_priv->pm_rps_events) == 0 && !client_boost) - goto out; - - mutex_lock(&rps->lock); - - pm_iir |= vlv_wa_c0_ei(dev_priv, pm_iir); - - adj = rps->last_adj; - new_delay = rps->cur_freq; - min = rps->min_freq_softlimit; - max = rps->max_freq_softlimit; - if (client_boost) - max = rps->max_freq; - if (client_boost && new_delay < rps->boost_freq) { - new_delay = rps->boost_freq; - adj = 0; - } else if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { - if (adj > 0) - adj *= 2; - else /* CHV needs even encode values */ - adj = IS_CHERRYVIEW(dev_priv) ? 2 : 1; - - if (new_delay >= rps->max_freq_softlimit) - adj = 0; - } else if (client_boost) { - adj = 0; - } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { - if (rps->cur_freq > rps->efficient_freq) - new_delay = rps->efficient_freq; - else if (rps->cur_freq > rps->min_freq_softlimit) - new_delay = rps->min_freq_softlimit; - adj = 0; - } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { - if (adj < 0) - adj *= 2; - else /* CHV needs even encode values */ - adj = IS_CHERRYVIEW(dev_priv) ? -2 : -1; - - if (new_delay <= rps->min_freq_softlimit) - adj = 0; - } else { /* unknown event */ - adj = 0; - } - - rps->last_adj = adj; - - /* - * Limit deboosting and boosting to keep ourselves at the extremes - * when in the respective power modes (i.e. slowly decrease frequencies - * while in the HIGH_POWER zone and slowly increase frequencies while - * in the LOW_POWER zone). On idle, we will hit the timeout and drop - * to the next level quickly, and conversely if busy we expect to - * hit a waitboost and rapidly switch into max power. - */ - if ((adj < 0 && rps->power.mode == HIGH_POWER) || - (adj > 0 && rps->power.mode == LOW_POWER)) - rps->last_adj = 0; - - /* sysfs frequency interfaces may have snuck in while servicing the - * interrupt - */ - new_delay += adj; - new_delay = clamp_t(int, new_delay, min, max); - - if (intel_set_rps(dev_priv, new_delay)) { - DRM_DEBUG_DRIVER("Failed to set new GPU frequency\n"); - rps->last_adj = 0; - } - - mutex_unlock(&rps->lock); - -out: - /* Make sure not to corrupt PMIMR state used by ringbuffer on GEN6 */ - spin_lock_irq(>->irq_lock); - if (rps->interrupts_enabled) - gen6_gt_pm_unmask_irq(gt, dev_priv->pm_rps_events); - spin_unlock_irq(>->irq_lock); -} - - /** * ivybridge_parity_work - Workqueue called when a parity error interrupt * occurred. @@ -1401,11 +1028,11 @@ static bool icp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { case HPD_PORT_A: - return val & ICP_DDIA_HPD_LONG_DETECT; + return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_A); case HPD_PORT_B: - return val & ICP_DDIB_HPD_LONG_DETECT; + return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_B); case HPD_PORT_C: - return val & TGP_DDIC_HPD_LONG_DETECT; + return val & SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(PORT_C); default: return false; } @@ -1427,20 +1054,6 @@ static bool icp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) } } -static bool tgp_ddi_port_hotplug_long_detect(enum hpd_pin pin, u32 val) -{ - switch (pin) { - case HPD_PORT_A: - return val & ICP_DDIA_HPD_LONG_DETECT; - case HPD_PORT_B: - return val & ICP_DDIB_HPD_LONG_DETECT; - case HPD_PORT_C: - return val & TGP_DDIC_HPD_LONG_DETECT; - default: - return false; - } -} - static bool tgp_tc_port_hotplug_long_detect(enum hpd_pin pin, u32 val) { switch (pin) { @@ -1652,54 +1265,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_i915_private *dev_priv, res1, res2); } -/* The RPS events need forcewake, so we add them to a work queue and mask their - * IMR bits until the work is done. Other interrupts can be processed without - * the work queue. */ -void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir) -{ - struct drm_i915_private *i915 = gt->i915; - struct intel_rps *rps = &i915->gt_pm.rps; - const u32 events = i915->pm_rps_events & pm_iir; - - lockdep_assert_held(>->irq_lock); - - if (unlikely(!events)) - return; - - gen6_gt_pm_mask_irq(gt, events); - - if (!rps->interrupts_enabled) - return; - - rps->pm_iir |= events; - schedule_work(&rps->work); -} - -void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - struct intel_gt *gt = &dev_priv->gt; - - if (pm_iir & dev_priv->pm_rps_events) { - spin_lock(>->irq_lock); - gen6_gt_pm_mask_irq(gt, pm_iir & dev_priv->pm_rps_events); - if (rps->interrupts_enabled) { - rps->pm_iir |= pm_iir & dev_priv->pm_rps_events; - schedule_work(&rps->work); - } - spin_unlock(>->irq_lock); - } - - if (INTEL_GEN(dev_priv) >= 8) - return; - - if (pm_iir & PM_VEBOX_USER_INTERRUPT) - intel_engine_breadcrumbs_irq(dev_priv->engine[VECS0]); - - if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) - DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); -} - static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) { enum pipe pipe; @@ -1716,7 +1281,7 @@ static void i9xx_pipestat_irq_reset(struct drm_i915_private *dev_priv) static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, u32 iir, u32 pipe_stats[I915_MAX_PIPES]) { - int pipe; + enum pipe pipe; spin_lock(&dev_priv->irq_lock); @@ -1741,6 +1306,7 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv, status_mask = PIPE_FIFO_UNDERRUN_STATUS; switch (pipe) { + default: case PIPE_A: iir_bit = I915_DISPLAY_PIPE_A_EVENT_INTERRUPT; break; @@ -2009,7 +1575,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg) if (gt_iir) gen6_gt_irq_handler(&dev_priv->gt, gt_iir); if (pm_iir) - gen6_rps_irq_handler(dev_priv, pm_iir); + gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir); if (hotplug_status) i9xx_hpd_irq_handler(dev_priv, hotplug_status); @@ -2136,7 +1702,7 @@ static void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - int pipe; + enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_ibx); @@ -2222,7 +1788,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv) static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - int pipe; + enum pipe pipe; u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; ibx_hpd_irq_handler(dev_priv, hotplug_trigger, hpd_cpt); @@ -2256,19 +1822,35 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) cpt_serr_int_handler(dev_priv); } -static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir, - const u32 *pins) +static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) { - u32 ddi_hotplug_trigger; - u32 tc_hotplug_trigger; + u32 ddi_hotplug_trigger, tc_hotplug_trigger; u32 pin_mask = 0, long_mask = 0; + bool (*tc_port_hotplug_long_detect)(enum hpd_pin pin, u32 val); + const u32 *pins; - if (HAS_PCH_MCC(dev_priv)) { + if (HAS_PCH_TGP(dev_priv)) { + ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; + tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP; + tc_port_hotplug_long_detect = tgp_tc_port_hotplug_long_detect; + pins = hpd_tgp; + } else if (HAS_PCH_JSP(dev_priv)) { ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; tc_hotplug_trigger = 0; + pins = hpd_tgp; + } else if (HAS_PCH_MCC(dev_priv)) { + ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; + tc_hotplug_trigger = pch_iir & SDE_TC_HOTPLUG_ICP(PORT_TC1); + tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect; + pins = hpd_icp; } else { + WARN(!HAS_PCH_ICP(dev_priv), + "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv)); + ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP; tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP; + tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect; + pins = hpd_icp; } if (ddi_hotplug_trigger) { @@ -2292,44 +1874,7 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir, intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, tc_hotplug_trigger, dig_hotplug_reg, pins, - icp_tc_port_hotplug_long_detect); - } - - if (pin_mask) - intel_hpd_irq_handler(dev_priv, pin_mask, long_mask); - - if (pch_iir & SDE_GMBUS_ICP) - gmbus_irq_handler(dev_priv); -} - -static void tgp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir) -{ - u32 ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_TGP; - u32 tc_hotplug_trigger = pch_iir & SDE_TC_MASK_TGP; - u32 pin_mask = 0, long_mask = 0; - - if (ddi_hotplug_trigger) { - u32 dig_hotplug_reg; - - dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_DDI); - I915_WRITE(SHOTPLUG_CTL_DDI, dig_hotplug_reg); - - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - ddi_hotplug_trigger, - dig_hotplug_reg, hpd_tgp, - tgp_ddi_port_hotplug_long_detect); - } - - if (tc_hotplug_trigger) { - u32 dig_hotplug_reg; - - dig_hotplug_reg = I915_READ(SHOTPLUG_CTL_TC); - I915_WRITE(SHOTPLUG_CTL_TC, dig_hotplug_reg); - - intel_get_hpd_pins(dev_priv, &pin_mask, &long_mask, - tc_hotplug_trigger, - dig_hotplug_reg, hpd_tgp, - tgp_tc_port_hotplug_long_detect); + tc_port_hotplug_long_detect); } if (pin_mask) @@ -2434,7 +1979,7 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv, } if (IS_GEN(dev_priv, 5) && de_iir & DE_PCU_EVENT) - ironlake_rps_change_irq_handler(dev_priv); + gen5_rps_irq_handler(&dev_priv->gt.rps); } static void ivb_display_irq_handler(struct drm_i915_private *dev_priv, @@ -2539,7 +2084,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg) if (pm_iir) { I915_WRITE(GEN6_PMIIR, pm_iir); ret = IRQ_HANDLED; - gen6_rps_irq_handler(dev_priv, pm_iir); + gen6_rps_irq_handler(&dev_priv->gt.rps, pm_iir); } } @@ -2616,10 +2161,16 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) u32 mask; if (INTEL_GEN(dev_priv) >= 12) - /* TODO: Add AUX entries for USBC */ return TGL_DE_PORT_AUX_DDIA | TGL_DE_PORT_AUX_DDIB | - TGL_DE_PORT_AUX_DDIC; + TGL_DE_PORT_AUX_DDIC | + TGL_DE_PORT_AUX_USBC1 | + TGL_DE_PORT_AUX_USBC2 | + TGL_DE_PORT_AUX_USBC3 | + TGL_DE_PORT_AUX_USBC4 | + TGL_DE_PORT_AUX_USBC5 | + TGL_DE_PORT_AUX_USBC6; + mask = GEN8_AUX_CHANNEL_A; if (INTEL_GEN(dev_priv) >= 9) @@ -2638,7 +2189,9 @@ static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv) static u32 gen8_de_pipe_fault_mask(struct drm_i915_private *dev_priv) { - if (INTEL_GEN(dev_priv) >= 9) + if (INTEL_GEN(dev_priv) >= 11) + return GEN11_DE_PIPE_IRQ_FAULT_ERRORS; + else if (INTEL_GEN(dev_priv) >= 9) return GEN9_DE_PIPE_IRQ_FAULT_ERRORS; else return GEN8_DE_PIPE_IRQ_FAULT_ERRORS; @@ -2655,11 +2208,21 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir) } if (iir & GEN8_DE_EDP_PSR) { - u32 psr_iir = I915_READ(EDP_PSR_IIR); + u32 psr_iir; + i915_reg_t iir_reg; + + if (INTEL_GEN(dev_priv) >= 12) + iir_reg = TRANS_PSR_IIR(dev_priv->psr.transcoder); + else + iir_reg = EDP_PSR_IIR; + + psr_iir = I915_READ(iir_reg); + I915_WRITE(iir_reg, psr_iir); + + if (psr_iir) + found = true; intel_psr_irq_handler(dev_priv, psr_iir); - I915_WRITE(EDP_PSR_IIR, psr_iir); - found = true; } if (!found) @@ -2780,12 +2343,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl) I915_WRITE(SDEIIR, iir); ret = IRQ_HANDLED; - if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP) - tgp_irq_handler(dev_priv, iir); - else if (INTEL_PCH_TYPE(dev_priv) >= PCH_MCC) - icp_irq_handler(dev_priv, iir, hpd_mcc); - else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_irq_handler(dev_priv, iir, hpd_icp); + if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) + icp_irq_handler(dev_priv, iir); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_SPT) spt_irq_handler(dev_priv, iir); else @@ -2894,9 +2453,11 @@ static inline void gen11_master_intr_enable(void __iomem * const regs) raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ); } -static irqreturn_t gen11_irq_handler(int irq, void *arg) +static __always_inline irqreturn_t +__gen11_irq_handler(struct drm_i915_private * const i915, + u32 (*intr_disable)(void __iomem * const regs), + void (*intr_enable)(void __iomem * const regs)) { - struct drm_i915_private * const i915 = arg; void __iomem * const regs = i915->uncore.regs; struct intel_gt *gt = &i915->gt; u32 master_ctl; @@ -2905,9 +2466,9 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) if (!intel_irqs_enabled(i915)) return IRQ_NONE; - master_ctl = gen11_master_intr_disable(regs); + master_ctl = intr_disable(regs); if (!master_ctl) { - gen11_master_intr_enable(regs); + intr_enable(regs); return IRQ_NONE; } @@ -2929,13 +2490,20 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg) gu_misc_iir = gen11_gu_misc_irq_ack(gt, master_ctl); - gen11_master_intr_enable(regs); + intr_enable(regs); gen11_gu_misc_irq_handler(gt, gu_misc_iir); return IRQ_HANDLED; } +static irqreturn_t gen11_irq_handler(int irq, void *arg) +{ + return __gen11_irq_handler(arg, + gen11_master_intr_disable, + gen11_master_intr_enable); +} + /* Called from drm generic code, passed 'crtc' which * we use as a pipe index */ @@ -2952,12 +2520,18 @@ int i8xx_enable_vblank(struct drm_crtc *crtc) return 0; } -int i945gm_enable_vblank(struct drm_crtc *crtc) +int i915gm_enable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); - if (dev_priv->i945gm_vblank.enabled++ == 0) - schedule_work(&dev_priv->i945gm_vblank.work); + /* + * Vblank interrupts fail to wake the device up from C2+. + * Disabling render clock gating during C-states avoids + * the problem. There is a small power cost so we do this + * only when vblank interrupts are actually enabled. + */ + if (dev_priv->vblank_enabled++ == 0) + I915_WRITE(SCPD0, _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); return i8xx_enable_vblank(crtc); } @@ -3030,14 +2604,14 @@ void i8xx_disable_vblank(struct drm_crtc *crtc) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -void i945gm_disable_vblank(struct drm_crtc *crtc) +void i915gm_disable_vblank(struct drm_crtc *crtc) { struct drm_i915_private *dev_priv = to_i915(crtc->dev); i8xx_disable_vblank(crtc); - if (--dev_priv->i945gm_vblank.enabled == 0) - schedule_work(&dev_priv->i945gm_vblank.work); + if (--dev_priv->vblank_enabled == 0) + I915_WRITE(SCPD0, _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE)); } void i965_disable_vblank(struct drm_crtc *crtc) @@ -3076,60 +2650,6 @@ void bdw_disable_vblank(struct drm_crtc *crtc) spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); } -static void i945gm_vblank_work_func(struct work_struct *work) -{ - struct drm_i915_private *dev_priv = - container_of(work, struct drm_i915_private, i945gm_vblank.work); - - /* - * Vblank interrupts fail to wake up the device from C3, - * hence we want to prevent C3 usage while vblank interrupts - * are enabled. - */ - pm_qos_update_request(&dev_priv->i945gm_vblank.pm_qos, - READ_ONCE(dev_priv->i945gm_vblank.enabled) ? - dev_priv->i945gm_vblank.c3_disable_latency : - PM_QOS_DEFAULT_VALUE); -} - -static int cstate_disable_latency(const char *name) -{ - const struct cpuidle_driver *drv; - int i; - - drv = cpuidle_get_driver(); - if (!drv) - return 0; - - for (i = 0; i < drv->state_count; i++) { - const struct cpuidle_state *state = &drv->states[i]; - - if (!strcmp(state->name, name)) - return state->exit_latency ? - state->exit_latency - 1 : 0; - } - - return 0; -} - -static void i945gm_vblank_work_init(struct drm_i915_private *dev_priv) -{ - INIT_WORK(&dev_priv->i945gm_vblank.work, - i945gm_vblank_work_func); - - dev_priv->i945gm_vblank.c3_disable_latency = - cstate_disable_latency("C3"); - pm_qos_add_request(&dev_priv->i945gm_vblank.pm_qos, - PM_QOS_CPU_DMA_LATENCY, - PM_QOS_DEFAULT_VALUE); -} - -static void i945gm_vblank_work_fini(struct drm_i915_private *dev_priv) -{ - cancel_work_sync(&dev_priv->i945gm_vblank.work); - pm_qos_remove_request(&dev_priv->i945gm_vblank.pm_qos); -} - static void ibx_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; @@ -3246,7 +2766,7 @@ static void valleyview_irq_reset(struct drm_i915_private *dev_priv) static void gen8_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - int pipe; + enum pipe pipe; gen8_master_intr_disable(dev_priv->uncore.regs); @@ -3271,7 +2791,7 @@ static void gen8_irq_reset(struct drm_i915_private *dev_priv) static void gen11_irq_reset(struct drm_i915_private *dev_priv) { struct intel_uncore *uncore = &dev_priv->uncore; - int pipe; + enum pipe pipe; gen11_master_intr_disable(dev_priv->uncore.regs); @@ -3279,8 +2799,23 @@ static void gen11_irq_reset(struct drm_i915_private *dev_priv) intel_uncore_write(uncore, GEN11_DISPLAY_INT_CTL, 0); - intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); - intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); + if (INTEL_GEN(dev_priv) >= 12) { + enum transcoder trans; + + for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) { + enum intel_display_power_domain domain; + + domain = POWER_DOMAIN_TRANSCODER(trans); + if (!intel_display_power_is_enabled(dev_priv, domain)) + continue; + + intel_uncore_write(uncore, TRANS_PSR_IMR(trans), 0xffffffff); + intel_uncore_write(uncore, TRANS_PSR_IIR(trans), 0xffffffff); + } + } else { + intel_uncore_write(uncore, EDP_PSR_IMR, 0xffffffff); + intel_uncore_write(uncore, EDP_PSR_IIR, 0xffffffff); + } for_each_pipe(dev_priv, pipe) if (intel_display_power_is_enabled(dev_priv, @@ -3431,42 +2966,44 @@ static void icp_hpd_detection_setup(struct drm_i915_private *dev_priv, } } -static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv) +static void icp_hpd_irq_setup(struct drm_i915_private *dev_priv, + u32 sde_ddi_mask, u32 sde_tc_mask, + u32 ddi_enable_mask, u32 tc_enable_mask, + const u32 *pins) { u32 hotplug_irqs, enabled_irqs; - hotplug_irqs = SDE_DDI_MASK_ICP | SDE_TC_MASK_ICP; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_icp); + hotplug_irqs = sde_ddi_mask | sde_tc_mask; + enabled_irqs = intel_hpd_enabled_irqs(dev_priv, pins); ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, - ICP_TC_HPD_ENABLE_MASK); + icp_hpd_detection_setup(dev_priv, ddi_enable_mask, tc_enable_mask); } +/* + * EHL doesn't need most of gen11_hpd_irq_setup, it's handling only the + * equivalent of SDE. + */ static void mcc_hpd_irq_setup(struct drm_i915_private *dev_priv) { - u32 hotplug_irqs, enabled_irqs; - - hotplug_irqs = SDE_DDI_MASK_TGP; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_mcc); - - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - - icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0); + icp_hpd_irq_setup(dev_priv, + SDE_DDI_MASK_ICP, SDE_TC_HOTPLUG_ICP(PORT_TC1), + ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE(PORT_TC1), + hpd_icp); } -static void tgp_hpd_irq_setup(struct drm_i915_private *dev_priv) +/* + * JSP behaves exactly the same as MCC above except that port C is mapped to + * the DDI-C pins instead of the TC1 pins. This means we should follow TGP's + * masks & tables rather than ICP's masks & tables. + */ +static void jsp_hpd_irq_setup(struct drm_i915_private *dev_priv) { - u32 hotplug_irqs, enabled_irqs; - - hotplug_irqs = SDE_DDI_MASK_TGP | SDE_TC_MASK_TGP; - enabled_irqs = intel_hpd_enabled_irqs(dev_priv, hpd_tgp); - - ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); - - icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, - TGP_TC_HPD_ENABLE_MASK); + icp_hpd_irq_setup(dev_priv, + SDE_DDI_MASK_TGP, 0, + TGP_DDI_HPD_ENABLE_MASK, 0, + hpd_tgp); } static void gen11_hpd_detection_setup(struct drm_i915_private *dev_priv) @@ -3506,9 +3043,13 @@ static void gen11_hpd_irq_setup(struct drm_i915_private *dev_priv) gen11_hpd_detection_setup(dev_priv); if (INTEL_PCH_TYPE(dev_priv) >= PCH_TGP) - tgp_hpd_irq_setup(dev_priv); + icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_TGP, SDE_TC_MASK_TGP, + TGP_DDI_HPD_ENABLE_MASK, + TGP_TC_HPD_ENABLE_MASK, hpd_tgp); else if (INTEL_PCH_TYPE(dev_priv) >= PCH_ICP) - icp_hpd_irq_setup(dev_priv); + icp_hpd_irq_setup(dev_priv, SDE_DDI_MASK_ICP, SDE_TC_MASK_ICP, + ICP_DDI_HPD_ENABLE_MASK, + ICP_TC_HPD_ENABLE_MASK, hpd_icp); } static void spt_hpd_detection_setup(struct drm_i915_private *dev_priv) @@ -3684,7 +3225,6 @@ static void ironlake_irq_postinstall(struct drm_i915_private *dev_priv) if (IS_HASWELL(dev_priv)) { gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); - intel_psr_irq_control(dev_priv, dev_priv->psr.debug); display_mask |= DE_EDP_PSR_INT_HSW; } @@ -3794,8 +3334,21 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv) else if (IS_BROADWELL(dev_priv)) de_port_enables |= GEN8_PORT_DP_A_HOTPLUG; - gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); - intel_psr_irq_control(dev_priv, dev_priv->psr.debug); + if (INTEL_GEN(dev_priv) >= 12) { + enum transcoder trans; + + for (trans = TRANSCODER_A; trans <= TRANSCODER_D; trans++) { + enum intel_display_power_domain domain; + + domain = POWER_DOMAIN_TRANSCODER(trans); + if (!intel_display_power_is_enabled(dev_priv, domain)) + continue; + + gen3_assert_iir_is_zero(uncore, TRANS_PSR_IIR(trans)); + } + } else { + gen3_assert_iir_is_zero(uncore, EDP_PSR_IIR); + } for_each_pipe(dev_priv, pipe) { dev_priv->de_irq_mask[pipe] = ~de_pipe_masked; @@ -3853,8 +3406,11 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv) if (HAS_PCH_TGP(dev_priv)) icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, TGP_TC_HPD_ENABLE_MASK); - else if (HAS_PCH_MCC(dev_priv)) + else if (HAS_PCH_JSP(dev_priv)) icp_hpd_detection_setup(dev_priv, TGP_DDI_HPD_ENABLE_MASK, 0); + else if (HAS_PCH_MCC(dev_priv)) + icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, + ICP_TC_HPD_ENABLE(PORT_TC1)); else icp_hpd_detection_setup(dev_priv, ICP_DDI_HPD_ENABLE_MASK, ICP_TC_HPD_ENABLE_MASK); @@ -4317,16 +3873,10 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) void intel_irq_init(struct drm_i915_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; - struct intel_rps *rps = &dev_priv->gt_pm.rps; int i; - if (IS_I945GM(dev_priv)) - i945gm_vblank_work_init(dev_priv); - intel_hpd_init_work(dev_priv); - INIT_WORK(&rps->work, gen6_pm_rps_work); - INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); for (i = 0; i < MAX_L3_SLICES; ++i) dev_priv->l3_parity.remap_info[i] = NULL; @@ -4335,33 +3885,6 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (HAS_GT_UC(dev_priv) && INTEL_GEN(dev_priv) < 11) dev_priv->gt.pm_guc_events = GUC_INTR_GUC2HOST << 16; - /* Let's track the enabled rps events */ - if (IS_VALLEYVIEW(dev_priv)) - /* WaGsvRC0ResidencyMethod:vlv */ - dev_priv->pm_rps_events = GEN6_PM_RP_UP_EI_EXPIRED; - else - dev_priv->pm_rps_events = (GEN6_PM_RP_UP_THRESHOLD | - GEN6_PM_RP_DOWN_THRESHOLD | - GEN6_PM_RP_DOWN_TIMEOUT); - - /* We share the register with other engine */ - if (INTEL_GEN(dev_priv) > 9) - GEM_WARN_ON(dev_priv->pm_rps_events & 0xffff0000); - - rps->pm_intrmsk_mbz = 0; - - /* - * SNB,IVB,HSW can while VLV,CHV may hard hang on looping batchbuffer - * if GEN6_PM_UP_EI_EXPIRED is masked. - * - * TODO: verify if this can be reproduced on VLV,CHV. - */ - if (INTEL_GEN(dev_priv) <= 7) - rps->pm_intrmsk_mbz |= GEN6_PM_RP_UP_EI_EXPIRED; - - if (INTEL_GEN(dev_priv) >= 8) - rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC; - dev->vblank_disable_immediate = true; /* Most platforms treat the display irq block as an always-on @@ -4387,8 +3910,9 @@ void intel_irq_init(struct drm_i915_private *dev_priv) if (I915_HAS_HOTPLUG(dev_priv)) dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; } else { - if (HAS_PCH_MCC(dev_priv)) - /* EHL doesn't need most of gen11_hpd_irq_setup */ + if (HAS_PCH_JSP(dev_priv)) + dev_priv->display.hpd_irq_setup = jsp_hpd_irq_setup; + else if (HAS_PCH_MCC(dev_priv)) dev_priv->display.hpd_irq_setup = mcc_hpd_irq_setup; else if (INTEL_GEN(dev_priv) >= 11) dev_priv->display.hpd_irq_setup = gen11_hpd_irq_setup; @@ -4411,9 +3935,6 @@ void intel_irq_fini(struct drm_i915_private *i915) { int i; - if (IS_I945GM(i915)) - i945gm_vblank_work_fini(i915); - for (i = 0; i < MAX_L3_SLICES; ++i) kfree(i915->l3_parity.remap_info[i]); } @@ -4538,10 +4059,10 @@ void intel_irq_uninstall(struct drm_i915_private *dev_priv) int irq = dev_priv->drm.pdev->irq; /* - * FIXME we can get called twice during driver load - * error handling due to intel_modeset_cleanup() - * calling us out of sequence. Would be nice if - * it didn't do that... + * FIXME we can get called twice during driver probe + * error handling as well as during driver remove due to + * intel_modeset_driver_remove() calling us out of sequence. + * Would be nice if it didn't do that... */ if (!dev_priv->drm.irq_enabled) return; diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h index 8e7e6071777e..812c47a9c2d6 100644 --- a/drivers/gpu/drm/i915/i915_irq.h +++ b/drivers/gpu/drm/i915/i915_irq.h @@ -17,14 +17,8 @@ struct drm_device; struct drm_display_mode; struct drm_i915_private; struct intel_crtc; -struct intel_crtc; -struct intel_gt; -struct intel_guc; struct intel_uncore; -void gen11_rps_irq_handler(struct intel_gt *gt, u32 pm_iir); -void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir); - void intel_irq_init(struct drm_i915_private *dev_priv); void intel_irq_fini(struct drm_i915_private *dev_priv); int intel_irq_install(struct drm_i915_private *dev_priv); @@ -106,12 +100,6 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, u8 pipe_mask); void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, u8 pipe_mask); -void gen9_reset_guc_interrupts(struct intel_guc *guc); -void gen9_enable_guc_interrupts(struct intel_guc *guc); -void gen9_disable_guc_interrupts(struct intel_guc *guc); -void gen11_reset_guc_interrupts(struct intel_guc *guc); -void gen11_enable_guc_interrupts(struct intel_guc *guc); -void gen11_disable_guc_interrupts(struct intel_guc *guc); bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe, bool in_vblank_irq, int *vpos, int *hpos, @@ -122,12 +110,12 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc); u32 g4x_get_vblank_counter(struct drm_crtc *crtc); int i8xx_enable_vblank(struct drm_crtc *crtc); -int i945gm_enable_vblank(struct drm_crtc *crtc); +int i915gm_enable_vblank(struct drm_crtc *crtc); int i965_enable_vblank(struct drm_crtc *crtc); int ilk_enable_vblank(struct drm_crtc *crtc); int bdw_enable_vblank(struct drm_crtc *crtc); void i8xx_disable_vblank(struct drm_crtc *crtc); -void i945gm_disable_vblank(struct drm_crtc *crtc); +void i915gm_disable_vblank(struct drm_crtc *crtc); void i965_disable_vblank(struct drm_crtc *crtc); void ilk_disable_vblank(struct drm_crtc *crtc); void bdw_disable_vblank(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 296452f9efe4..1dd1f3652795 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -46,7 +46,8 @@ i915_param_named(modeset, int, 0400, i915_param_named_unsafe(enable_dc, int, 0400, "Enable power-saving display C-states. " - "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6)"); + "(-1=auto [default]; 0=disable; 1=up to DC5; 2=up to DC6; " + "3=up to DC5 with DC3CO; 4=up to DC6 with DC3CO)"); i915_param_named_unsafe(enable_fbc, int, 0600, "Enable frame buffer compression for power savings " @@ -165,7 +166,7 @@ i915_param_named_unsafe(enable_dp_mst, bool, 0600, "Enable multi-stream transport (MST) for new DisplayPort sinks. (default: true)"); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) -i915_param_named_unsafe(inject_load_failure, uint, 0400, +i915_param_named_unsafe(inject_probe_failure, uint, 0400, "Force an error after a number of failure check points (0:disabled (default), N:force failure at the Nth failure check point)"); #endif @@ -178,6 +179,11 @@ i915_param_named(enable_gvt, bool, 0400, "Enable support for Intel GVT-g graphics virtualization host support(default:false)"); #endif +#if IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM) +i915_param_named_unsafe(fake_lmem_start, ulong, 0600, + "Fake LMEM start offset (default: 0)"); +#endif + static __always_inline void _print_param(struct drm_printer *p, const char *name, const char *type, @@ -189,6 +195,8 @@ static __always_inline void _print_param(struct drm_printer *p, drm_printf(p, "i915.%s=%d\n", name, *(const int *)x); else if (!__builtin_strcmp(type, "unsigned int")) drm_printf(p, "i915.%s=%u\n", name, *(const unsigned int *)x); + else if (!__builtin_strcmp(type, "unsigned long")) + drm_printf(p, "i915.%s=%lu\n", name, *(const unsigned long *)x); else if (!__builtin_strcmp(type, "char *")) drm_printf(p, "i915.%s=%s\n", name, *(const char **)x); else diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h index d29ade3b7de6..31b88f297fbc 100644 --- a/drivers/gpu/drm/i915/i915_params.h +++ b/drivers/gpu/drm/i915/i915_params.h @@ -61,11 +61,12 @@ struct drm_printer; param(char *, dmc_firmware_path, NULL) \ param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \ param(int, edp_vswing, 0) \ - param(int, reset, 2) \ - param(unsigned int, inject_load_failure, 0) \ + param(int, reset, 3) \ + param(unsigned int, inject_probe_failure, 0) \ param(int, fastboot, -1) \ param(int, enable_dpcd_backlight, 0) \ param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \ + param(unsigned long, fake_lmem_start, 0) \ /* leave bools at the end to not create holes */ \ param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \ param(bool, enable_hangcheck, true) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 1974e4c78a43..1bb701d32a5d 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -23,7 +23,6 @@ */ #include <linux/console.h> -#include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <drm/drm_drv.h> @@ -118,6 +117,14 @@ [PIPE_C] = IVB_CURSOR_C_OFFSET, \ } +#define TGL_CURSOR_OFFSETS \ + .cursor_offsets = { \ + [PIPE_A] = CURSOR_A_OFFSET, \ + [PIPE_B] = IVB_CURSOR_B_OFFSET, \ + [PIPE_C] = IVB_CURSOR_C_OFFSET, \ + [PIPE_D] = TGL_CURSOR_D_OFFSET, \ + } + #define I9XX_COLORS \ .color = { .gamma_lut_size = 256 } #define I965_COLORS \ @@ -144,10 +151,13 @@ #define GEN_DEFAULT_PAGE_SIZES \ .page_sizes = I915_GTT_PAGE_SIZE_4K +#define GEN_DEFAULT_REGIONS \ + .memory_regions = REGION_SMEM | REGION_STOLEN + #define I830_FEATURES \ GEN(2), \ .is_mobile = 1, \ - .num_pipes = 2, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .display.has_overlay = 1, \ .display.cursor_needs_physical = 1, \ .display.overlay_needs_physical = 1, \ @@ -161,11 +171,12 @@ I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ I9XX_COLORS, \ - GEN_DEFAULT_PAGE_SIZES + GEN_DEFAULT_PAGE_SIZES, \ + GEN_DEFAULT_REGIONS #define I845_FEATURES \ GEN(2), \ - .num_pipes = 1, \ + .pipe_mask = BIT(PIPE_A), \ .display.has_overlay = 1, \ .display.overlay_needs_physical = 1, \ .display.has_gmch = 1, \ @@ -178,7 +189,8 @@ I845_PIPE_OFFSETS, \ I845_CURSOR_OFFSETS, \ I9XX_COLORS, \ - GEN_DEFAULT_PAGE_SIZES + GEN_DEFAULT_PAGE_SIZES, \ + GEN_DEFAULT_REGIONS static const struct intel_device_info intel_i830_info = { I830_FEATURES, @@ -203,7 +215,7 @@ static const struct intel_device_info intel_i865g_info = { #define GEN3_FEATURES \ GEN(3), \ - .num_pipes = 2, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ .engine_mask = BIT(RCS0), \ @@ -212,7 +224,8 @@ static const struct intel_device_info intel_i865g_info = { I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ I9XX_COLORS, \ - GEN_DEFAULT_PAGE_SIZES + GEN_DEFAULT_PAGE_SIZES, \ + GEN_DEFAULT_REGIONS static const struct intel_device_info intel_i915g_info = { GEN3_FEATURES, @@ -287,7 +300,7 @@ static const struct intel_device_info intel_pineview_m_info = { #define GEN4_FEATURES \ GEN(4), \ - .num_pipes = 2, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .display.has_hotplug = 1, \ .display.has_gmch = 1, \ .gpu_reset_clobbers_display = true, \ @@ -297,7 +310,8 @@ static const struct intel_device_info intel_pineview_m_info = { I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ I965_COLORS, \ - GEN_DEFAULT_PAGE_SIZES + GEN_DEFAULT_PAGE_SIZES, \ + GEN_DEFAULT_REGIONS static const struct intel_device_info intel_i965g_info = { GEN4_FEATURES, @@ -337,7 +351,7 @@ static const struct intel_device_info intel_gm45_info = { #define GEN5_FEATURES \ GEN(5), \ - .num_pipes = 2, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .display.has_hotplug = 1, \ .engine_mask = BIT(RCS0) | BIT(VCS0), \ .has_snoop = true, \ @@ -347,7 +361,8 @@ static const struct intel_device_info intel_gm45_info = { I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ ILK_COLORS, \ - GEN_DEFAULT_PAGE_SIZES + GEN_DEFAULT_PAGE_SIZES, \ + GEN_DEFAULT_REGIONS static const struct intel_device_info intel_ironlake_d_info = { GEN5_FEATURES, @@ -363,7 +378,7 @@ static const struct intel_device_info intel_ironlake_m_info = { #define GEN6_FEATURES \ GEN(6), \ - .num_pipes = 2, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ @@ -377,7 +392,8 @@ static const struct intel_device_info intel_ironlake_m_info = { I9XX_PIPE_OFFSETS, \ I9XX_CURSOR_OFFSETS, \ ILK_COLORS, \ - GEN_DEFAULT_PAGE_SIZES + GEN_DEFAULT_PAGE_SIZES, \ + GEN_DEFAULT_REGIONS #define SNB_D_PLATFORM \ GEN6_FEATURES, \ @@ -411,7 +427,7 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { #define GEN7_FEATURES \ GEN(7), \ - .num_pipes = 3, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ .display.has_hotplug = 1, \ .display.has_fbc = 1, \ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ @@ -420,12 +436,13 @@ static const struct intel_device_info intel_sandybridge_m_gt2_info = { .has_rc6 = 1, \ .has_rc6p = 1, \ .has_rps = true, \ - .ppgtt_type = INTEL_PPGTT_FULL, \ + .ppgtt_type = INTEL_PPGTT_ALIASING, \ .ppgtt_size = 31, \ IVB_PIPE_OFFSETS, \ IVB_CURSOR_OFFSETS, \ IVB_COLORS, \ - GEN_DEFAULT_PAGE_SIZES + GEN_DEFAULT_PAGE_SIZES, \ + GEN_DEFAULT_REGIONS #define IVB_D_PLATFORM \ GEN7_FEATURES, \ @@ -462,7 +479,7 @@ static const struct intel_device_info intel_ivybridge_q_info = { GEN7_FEATURES, PLATFORM(INTEL_IVYBRIDGE), .gt = 2, - .num_pipes = 0, /* legal, last one wins */ + .pipe_mask = 0, /* legal, last one wins */ .has_l3_dpf = 1, }; @@ -470,13 +487,13 @@ static const struct intel_device_info intel_valleyview_info = { PLATFORM(INTEL_VALLEYVIEW), GEN(7), .is_lp = 1, - .num_pipes = 2, + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), .has_runtime_pm = 1, .has_rc6 = 1, .has_rps = true, .display.has_gmch = 1, .display.has_hotplug = 1, - .ppgtt_type = INTEL_PPGTT_FULL, + .ppgtt_type = INTEL_PPGTT_ALIASING, .ppgtt_size = 31, .has_snoop = true, .has_coherent_ggtt = false, @@ -486,6 +503,7 @@ static const struct intel_device_info intel_valleyview_info = { I9XX_CURSOR_OFFSETS, I965_COLORS, GEN_DEFAULT_PAGE_SIZES, + GEN_DEFAULT_REGIONS, }; #define G75_FEATURES \ @@ -560,7 +578,7 @@ static const struct intel_device_info intel_broadwell_gt3_info = { static const struct intel_device_info intel_cherryview_info = { PLATFORM(INTEL_CHERRYVIEW), GEN(8), - .num_pipes = 3, + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), .display.has_hotplug = 1, .is_lp = 1, .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), @@ -570,7 +588,7 @@ static const struct intel_device_info intel_cherryview_info = { .has_rps = true, .has_logical_ring_contexts = 1, .display.has_gmch = 1, - .ppgtt_type = INTEL_PPGTT_FULL, + .ppgtt_type = INTEL_PPGTT_ALIASING, .ppgtt_size = 32, .has_reset_engine = 1, .has_snoop = true, @@ -580,6 +598,7 @@ static const struct intel_device_info intel_cherryview_info = { CHV_CURSOR_OFFSETS, CHV_COLORS, GEN_DEFAULT_PAGE_SIZES, + GEN_DEFAULT_REGIONS, }; #define GEN9_DEFAULT_PAGE_SIZES \ @@ -593,6 +612,7 @@ static const struct intel_device_info intel_cherryview_info = { .has_logical_ring_preemption = 1, \ .display.has_csr = 1, \ .has_gt_uc = 1, \ + .display.has_hdcp = 1, \ .display.has_ipc = 1, \ .ddb_size = 896 @@ -631,11 +651,12 @@ static const struct intel_device_info intel_skylake_gt4_info = { .is_lp = 1, \ .display.has_hotplug = 1, \ .engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \ - .num_pipes = 3, \ + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ .has_64bit_reloc = 1, \ .display.has_ddi = 1, \ .has_fpga_dbg = 1, \ .display.has_fbc = 1, \ + .display.has_hdcp = 1, \ .display.has_psr = 1, \ .has_runtime_pm = 1, \ .display.has_csr = 1, \ @@ -654,7 +675,8 @@ static const struct intel_device_info intel_skylake_gt4_info = { HSW_PIPE_OFFSETS, \ IVB_CURSOR_OFFSETS, \ IVB_COLORS, \ - GEN9_DEFAULT_PAGE_SIZES + GEN9_DEFAULT_PAGE_SIZES, \ + GEN_DEFAULT_REGIONS static const struct intel_device_info intel_broxton_info = { GEN9_LP_FEATURES, @@ -715,6 +737,7 @@ static const struct intel_device_info intel_coffeelake_gt3_info = { GEN9_FEATURES, \ GEN(10), \ .ddb_size = 1024, \ + .display.has_dsc = 1, \ .has_coherent_ggtt = false, \ GLK_COLORS @@ -787,18 +810,25 @@ static const struct intel_device_info intel_elkhartlake_info = { [TRANSCODER_DSI_0] = TRANSCODER_DSI0_OFFSET, \ [TRANSCODER_DSI_1] = TRANSCODER_DSI1_OFFSET, \ }, \ - .has_global_mocs = 1 + TGL_CURSOR_OFFSETS, \ + .has_global_mocs = 1, \ + .display.has_dsb = 1 static const struct intel_device_info intel_tigerlake_12_info = { GEN12_FEATURES, PLATFORM(INTEL_TIGERLAKE), - .num_pipes = 4, + .pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D), .require_force_probe = 1, .display.has_modular_fia = 1, .engine_mask = BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2), + .has_rps = false, /* XXX disabled for debugging */ }; +#define GEN12_DGFX_FEATURES \ + GEN12_FEATURES, \ + .is_dgfx = 1 + #undef GEN #undef PLATFORM diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index e42b86827d6b..65d7c2e599de 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -196,8 +196,11 @@ #include <linux/uuid.h> #include "gem/i915_gem_context.h" -#include "gem/i915_gem_pm.h" +#include "gt/intel_engine_pm.h" +#include "gt/intel_engine_user.h" +#include "gt/intel_gt.h" #include "gt/intel_lrc_reg.h" +#include "gt/intel_ring.h" #include "i915_drv.h" #include "i915_perf.h" @@ -215,6 +218,7 @@ #include "oa/i915_oa_cflgt3.h" #include "oa/i915_oa_cnl.h" #include "oa/i915_oa_icl.h" +#include "oa/i915_oa_tgl.h" /* HW requires this to be a power of two, between 128k and 16M, though driver * is currently generally designed assuming the largest 16M size is used such @@ -291,6 +295,7 @@ static u32 i915_perf_stream_paranoid = true; /* On Gen8+ automatically triggered OA reports include a 'reason' field... */ #define OAREPORT_REASON_MASK 0x3f +#define OAREPORT_REASON_MASK_EXTENDED 0x7f #define OAREPORT_REASON_SHIFT 19 #define OAREPORT_REASON_TIMER (1<<0) #define OAREPORT_REASON_CTX_SWITCH (1<<3) @@ -336,17 +341,24 @@ static const struct i915_oa_format gen8_plus_oa_formats[I915_OA_FORMAT_MAX] = { [I915_OA_FORMAT_C4_B8] = { 7, 64 }, }; +static const struct i915_oa_format gen12_oa_formats[I915_OA_FORMAT_MAX] = { + [I915_OA_FORMAT_A32u40_A4u32_B8_C8] = { 5, 256 }, +}; + #define SAMPLE_OA_REPORT (1<<0) /** * struct perf_open_properties - for validated properties given to open a stream * @sample_flags: `DRM_I915_PERF_PROP_SAMPLE_*` properties are tracked as flags * @single_context: Whether a single or all gpu contexts should be monitored + * @hold_preemption: Whether the preemption is disabled for the filtered + * context * @ctx_handle: A gem ctx handle for use with @single_context * @metrics_set: An ID for an OA unit metric set advertised via sysfs * @oa_format: An OA unit HW report format * @oa_periodic: Whether to enable periodic OA unit sampling * @oa_period_exponent: The OA unit sampling period is derived from this + * @engine: The engine (typically rcs0) being monitored by the OA unit * * As read_properties_unlocked() enumerates and validates the properties given * to open a stream of metrics the configuration is built up in the structure @@ -356,6 +368,7 @@ struct perf_open_properties { u32 sample_flags; u64 single_context:1; + u64 hold_preemption:1; u64 ctx_handle; /* OA sampling state */ @@ -363,69 +376,74 @@ struct perf_open_properties { int oa_format; bool oa_periodic; int oa_period_exponent; + + struct intel_engine_cs *engine; +}; + +struct i915_oa_config_bo { + struct llist_node node; + + struct i915_oa_config *oa_config; + struct i915_vma *vma; }; static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer); -static void free_oa_config(struct drm_i915_private *dev_priv, - struct i915_oa_config *oa_config) +void i915_oa_config_release(struct kref *ref) { - if (!PTR_ERR(oa_config->flex_regs)) - kfree(oa_config->flex_regs); - if (!PTR_ERR(oa_config->b_counter_regs)) - kfree(oa_config->b_counter_regs); - if (!PTR_ERR(oa_config->mux_regs)) - kfree(oa_config->mux_regs); - kfree(oa_config); -} + struct i915_oa_config *oa_config = + container_of(ref, typeof(*oa_config), ref); -static void put_oa_config(struct drm_i915_private *dev_priv, - struct i915_oa_config *oa_config) -{ - if (!atomic_dec_and_test(&oa_config->ref_count)) - return; + kfree(oa_config->flex_regs); + kfree(oa_config->b_counter_regs); + kfree(oa_config->mux_regs); - free_oa_config(dev_priv, oa_config); + kfree_rcu(oa_config, rcu); } -static int get_oa_config(struct drm_i915_private *dev_priv, - int metrics_set, - struct i915_oa_config **out_config) +struct i915_oa_config * +i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set) { - int ret; + struct i915_oa_config *oa_config; - if (metrics_set == 1) { - *out_config = &dev_priv->perf.test_config; - atomic_inc(&dev_priv->perf.test_config.ref_count); - return 0; - } + rcu_read_lock(); + if (metrics_set == 1) + oa_config = &perf->test_config; + else + oa_config = idr_find(&perf->metrics_idr, metrics_set); + if (oa_config) + oa_config = i915_oa_config_get(oa_config); + rcu_read_unlock(); - ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); - if (ret) - return ret; + return oa_config; +} - *out_config = idr_find(&dev_priv->perf.metrics_idr, metrics_set); - if (!*out_config) - ret = -EINVAL; - else - atomic_inc(&(*out_config)->ref_count); +static void free_oa_config_bo(struct i915_oa_config_bo *oa_bo) +{ + i915_oa_config_put(oa_bo->oa_config); + i915_vma_put(oa_bo->vma); + kfree(oa_bo); +} - mutex_unlock(&dev_priv->perf.metrics_lock); +static u32 gen12_oa_hw_tail_read(struct i915_perf_stream *stream) +{ + struct intel_uncore *uncore = stream->uncore; - return ret; + return intel_uncore_read(uncore, GEN12_OAG_OATAILPTR) & + GEN12_OAG_OATAILPTR_MASK; } static u32 gen8_oa_hw_tail_read(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = stream->uncore; - return I915_READ(GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; + return intel_uncore_read(uncore, GEN8_OATAILPTR) & GEN8_OATAILPTR_MASK; } static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; - u32 oastatus1 = I915_READ(GEN7_OASTATUS1); + struct intel_uncore *uncore = stream->uncore; + u32 oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); return oastatus1 & GEN7_OASTATUS1_TAIL_MASK; } @@ -456,7 +474,6 @@ static u32 gen7_oa_hw_tail_read(struct i915_perf_stream *stream) */ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; int report_size = stream->oa_buffer.format_size; unsigned long flags; unsigned int aged_idx; @@ -479,7 +496,7 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) aged_tail = stream->oa_buffer.tails[aged_idx].offset; aging_tail = stream->oa_buffer.tails[!aged_idx].offset; - hw_tail = dev_priv->perf.ops.oa_hw_tail_read(stream); + hw_tail = stream->perf->ops.oa_hw_tail_read(stream); /* The tail pointer increases in 64 byte increments, * not in report_size steps... @@ -536,7 +553,7 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream) aging_tail = hw_tail; stream->oa_buffer.aging_timestamp = now; } else { - DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %u\n", + DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n", hw_tail); } } @@ -655,7 +672,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, size_t count, size_t *offset) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = stream->uncore; int report_size = stream->oa_buffer.format_size; u8 *oa_buf_base = stream->oa_buffer.vaddr; u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); @@ -738,9 +755,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * it to userspace... */ reason = ((report32[0] >> OAREPORT_REASON_SHIFT) & - OAREPORT_REASON_MASK); + (IS_GEN(stream->perf->i915, 12) ? + OAREPORT_REASON_MASK_EXTENDED : + OAREPORT_REASON_MASK)); if (reason == 0) { - if (__ratelimit(&dev_priv->perf.spurious_report_rs)) + if (__ratelimit(&stream->perf->spurious_report_rs)) DRM_NOTE("Skipping spurious, invalid OA report\n"); continue; } @@ -755,7 +774,8 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * Note: that we don't clear the valid_ctx_bit so userspace can * understand that the ID has been squashed by the kernel. */ - if (!(report32[0] & dev_priv->perf.gen8_valid_ctx_bit)) + if (!(report32[0] & stream->perf->gen8_valid_ctx_bit) && + INTEL_GEN(stream->perf->i915) <= 11) ctx_id = report32[2] = INVALID_CTX_ID; /* @@ -789,7 +809,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * switches since it's not-uncommon for periodic samples to * identify a switch before any 'context switch' report. */ - if (!dev_priv->perf.exclusive_stream->ctx || + if (!stream->perf->exclusive_stream->ctx || stream->specific_ctx_id == ctx_id || stream->oa_buffer.last_ctx_id == stream->specific_ctx_id || reason & OAREPORT_REASON_CTX_SWITCH) { @@ -798,7 +818,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * While filtering for a single context we avoid * leaking the IDs of other contexts. */ - if (dev_priv->perf.exclusive_stream->ctx && + if (stream->perf->exclusive_stream->ctx && stream->specific_ctx_id != ctx_id) { report32[2] = INVALID_CTX_ID; } @@ -822,6 +842,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, } if (start_offset != *offset) { + i915_reg_t oaheadptr; + + oaheadptr = IS_GEN(stream->perf->i915, 12) ? + GEN12_OAG_OAHEADPTR : GEN8_OAHEADPTR; + spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); /* @@ -829,8 +854,8 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream, * relative to oa_buf_base so put back here... */ head += gtt_offset; - - I915_WRITE(GEN8_OAHEADPTR, head & GEN8_OAHEADPTR_MASK); + intel_uncore_write(uncore, oaheadptr, + head & GEN12_OAG_OAHEADPTR_MASK); stream->oa_buffer.head = head; spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); @@ -864,14 +889,18 @@ static int gen8_oa_read(struct i915_perf_stream *stream, size_t count, size_t *offset) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = stream->uncore; u32 oastatus; + i915_reg_t oastatus_reg; int ret; if (WARN_ON(!stream->oa_buffer.vaddr)) return -EIO; - oastatus = I915_READ(GEN8_OASTATUS); + oastatus_reg = IS_GEN(stream->perf->i915, 12) ? + GEN12_OAG_OASTATUS : GEN8_OASTATUS; + + oastatus = intel_uncore_read(uncore, oastatus_reg); /* * We treat OABUFFER_OVERFLOW as a significant error: @@ -896,14 +925,14 @@ static int gen8_oa_read(struct i915_perf_stream *stream, DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", stream->period_exponent); - dev_priv->perf.ops.oa_disable(stream); - dev_priv->perf.ops.oa_enable(stream); + stream->perf->ops.oa_disable(stream); + stream->perf->ops.oa_enable(stream); /* * Note: .oa_enable() is expected to re-init the oabuffer and * reset GEN8_OASTATUS for us */ - oastatus = I915_READ(GEN8_OASTATUS); + oastatus = intel_uncore_read(uncore, oastatus_reg); } if (oastatus & GEN8_OASTATUS_REPORT_LOST) { @@ -911,8 +940,8 @@ static int gen8_oa_read(struct i915_perf_stream *stream, DRM_I915_PERF_RECORD_OA_REPORT_LOST); if (ret) return ret; - I915_WRITE(GEN8_OASTATUS, - oastatus & ~GEN8_OASTATUS_REPORT_LOST); + intel_uncore_write(uncore, oastatus_reg, + oastatus & ~GEN8_OASTATUS_REPORT_LOST); } return gen8_append_oa_reports(stream, buf, count, offset); @@ -943,7 +972,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, size_t count, size_t *offset) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = stream->uncore; int report_size = stream->oa_buffer.format_size; u8 *oa_buf_base = stream->oa_buffer.vaddr; u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); @@ -1017,7 +1046,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, * copying it to userspace... */ if (report32[0] == 0) { - if (__ratelimit(&dev_priv->perf.spurious_report_rs)) + if (__ratelimit(&stream->perf->spurious_report_rs)) DRM_NOTE("Skipping spurious, invalid OA report\n"); continue; } @@ -1043,9 +1072,9 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream, */ head += gtt_offset; - I915_WRITE(GEN7_OASTATUS2, - ((head & GEN7_OASTATUS2_HEAD_MASK) | - GEN7_OASTATUS2_MEM_SELECT_GGTT)); + intel_uncore_write(uncore, GEN7_OASTATUS2, + (head & GEN7_OASTATUS2_HEAD_MASK) | + GEN7_OASTATUS2_MEM_SELECT_GGTT); stream->oa_buffer.head = head; spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); @@ -1075,21 +1104,21 @@ static int gen7_oa_read(struct i915_perf_stream *stream, size_t count, size_t *offset) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = stream->uncore; u32 oastatus1; int ret; if (WARN_ON(!stream->oa_buffer.vaddr)) return -EIO; - oastatus1 = I915_READ(GEN7_OASTATUS1); + oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); /* XXX: On Haswell we don't have a safe way to clear oastatus1 * bits while the OA unit is enabled (while the tail pointer * may be updated asynchronously) so we ignore status bits * that have already been reported to userspace. */ - oastatus1 &= ~dev_priv->perf.gen7_latched_oastatus1; + oastatus1 &= ~stream->perf->gen7_latched_oastatus1; /* We treat OABUFFER_OVERFLOW as a significant error: * @@ -1120,10 +1149,10 @@ static int gen7_oa_read(struct i915_perf_stream *stream, DRM_DEBUG("OA buffer overflow (exponent = %d): force restart\n", stream->period_exponent); - dev_priv->perf.ops.oa_disable(stream); - dev_priv->perf.ops.oa_enable(stream); + stream->perf->ops.oa_disable(stream); + stream->perf->ops.oa_enable(stream); - oastatus1 = I915_READ(GEN7_OASTATUS1); + oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1); } if (unlikely(oastatus1 & GEN7_OASTATUS1_REPORT_LOST)) { @@ -1131,7 +1160,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream, DRM_I915_PERF_RECORD_OA_REPORT_LOST); if (ret) return ret; - dev_priv->perf.gen7_latched_oastatus1 |= + stream->perf->gen7_latched_oastatus1 |= GEN7_OASTATUS1_REPORT_LOST; } @@ -1196,25 +1225,18 @@ static int i915_oa_read(struct i915_perf_stream *stream, size_t count, size_t *offset) { - struct drm_i915_private *dev_priv = stream->dev_priv; - - return dev_priv->perf.ops.read(stream, buf, count, offset); + return stream->perf->ops.read(stream, buf, count, offset); } static struct intel_context *oa_pin_context(struct i915_perf_stream *stream) { struct i915_gem_engines_iter it; - struct drm_i915_private *i915 = stream->dev_priv; struct i915_gem_context *ctx = stream->ctx; struct intel_context *ce; int err; - err = i915_mutex_lock_interruptible(&i915->drm); - if (err) - return ERR_PTR(err); - for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { - if (ce->engine->class != RENDER_CLASS) + if (ce->engine != stream->engine) /* first match! */ continue; /* @@ -1229,10 +1251,6 @@ static struct intel_context *oa_pin_context(struct i915_perf_stream *stream) } i915_gem_context_unlock_engines(ctx); - mutex_unlock(&i915->drm.struct_mutex); - if (err) - return ERR_PTR(err); - return stream->pinned_ctx; } @@ -1248,14 +1266,13 @@ static struct intel_context *oa_pin_context(struct i915_perf_stream *stream) */ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) { - struct drm_i915_private *i915 = stream->dev_priv; struct intel_context *ce; ce = oa_pin_context(stream); if (IS_ERR(ce)) return PTR_ERR(ce); - switch (INTEL_GEN(i915)) { + switch (INTEL_GEN(ce->engine->i915)) { case 7: { /* * On Haswell we don't do any post processing of the reports @@ -1269,7 +1286,11 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) case 8: case 9: case 10: - if (USES_GUC_SUBMISSION(i915)) { + if (intel_engine_in_execlists_submission_mode(ce->engine)) { + stream->specific_ctx_id_mask = + (1U << GEN8_CTX_ID_WIDTH) - 1; + stream->specific_ctx_id = stream->specific_ctx_id_mask; + } else { /* * When using GuC, the context descriptor we write in * i915 is read by GuC and rewritten before it's @@ -1289,31 +1310,23 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) */ stream->specific_ctx_id_mask = (1U << (GEN8_CTX_ID_WIDTH - 1)) - 1; - } else { - stream->specific_ctx_id_mask = - (1U << GEN8_CTX_ID_WIDTH) - 1; - stream->specific_ctx_id = - upper_32_bits(ce->lrc_desc); - stream->specific_ctx_id &= - stream->specific_ctx_id_mask; } break; - case 11: { + case 11: + case 12: { stream->specific_ctx_id_mask = - ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32) | - ((1U << GEN11_ENGINE_INSTANCE_WIDTH) - 1) << (GEN11_ENGINE_INSTANCE_SHIFT - 32) | - ((1 << GEN11_ENGINE_CLASS_WIDTH) - 1) << (GEN11_ENGINE_CLASS_SHIFT - 32); - stream->specific_ctx_id = upper_32_bits(ce->lrc_desc); - stream->specific_ctx_id &= - stream->specific_ctx_id_mask; + ((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32); + stream->specific_ctx_id = stream->specific_ctx_id_mask; break; } default: - MISSING_CASE(INTEL_GEN(i915)); + MISSING_CASE(INTEL_GEN(ce->engine->i915)); } + ce->tag = stream->specific_ctx_id_mask; + DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n", stream->specific_ctx_id, stream->specific_ctx_id_mask); @@ -1330,69 +1343,76 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream) */ static void oa_put_render_ctx_id(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; struct intel_context *ce; - stream->specific_ctx_id = INVALID_CTX_ID; - stream->specific_ctx_id_mask = 0; - ce = fetch_and_zero(&stream->pinned_ctx); if (ce) { - mutex_lock(&dev_priv->drm.struct_mutex); + ce->tag = 0; /* recomputed on next submission after parking */ intel_context_unpin(ce); - mutex_unlock(&dev_priv->drm.struct_mutex); } + + stream->specific_ctx_id = INVALID_CTX_ID; + stream->specific_ctx_id_mask = 0; } static void free_oa_buffer(struct i915_perf_stream *stream) { - struct drm_i915_private *i915 = stream->dev_priv; - - mutex_lock(&i915->drm.struct_mutex); - i915_vma_unpin_and_release(&stream->oa_buffer.vma, I915_VMA_RELEASE_MAP); - mutex_unlock(&i915->drm.struct_mutex); - stream->oa_buffer.vaddr = NULL; } +static void +free_oa_configs(struct i915_perf_stream *stream) +{ + struct i915_oa_config_bo *oa_bo, *tmp; + + i915_oa_config_put(stream->oa_config); + llist_for_each_entry_safe(oa_bo, tmp, stream->oa_config_bos.first, node) + free_oa_config_bo(oa_bo); +} + +static void +free_noa_wait(struct i915_perf_stream *stream) +{ + i915_vma_unpin_and_release(&stream->noa_wait, 0); +} + static void i915_oa_stream_destroy(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct i915_perf *perf = stream->perf; - BUG_ON(stream != dev_priv->perf.exclusive_stream); + BUG_ON(stream != perf->exclusive_stream); /* * Unset exclusive_stream first, it will be checked while disabling * the metric set on gen8+. */ - mutex_lock(&dev_priv->drm.struct_mutex); - dev_priv->perf.exclusive_stream = NULL; - dev_priv->perf.ops.disable_metric_set(stream); - mutex_unlock(&dev_priv->drm.struct_mutex); + perf->exclusive_stream = NULL; + perf->ops.disable_metric_set(stream); free_oa_buffer(stream); - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref); + intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); + intel_engine_pm_put(stream->engine); if (stream->ctx) oa_put_render_ctx_id(stream); - put_oa_config(dev_priv, stream->oa_config); + free_oa_configs(stream); + free_noa_wait(stream); - if (dev_priv->perf.spurious_report_rs.missed) { + if (perf->spurious_report_rs.missed) { DRM_NOTE("%d spurious OA report notices suppressed due to ratelimiting\n", - dev_priv->perf.spurious_report_rs.missed); + perf->spurious_report_rs.missed); } } static void gen7_init_oa_buffer(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = stream->uncore; u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); unsigned long flags; @@ -1401,13 +1421,14 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream) /* Pre-DevBDW: OABUFFER must be set with counters off, * before OASTATUS1, but after OASTATUS2 */ - I915_WRITE(GEN7_OASTATUS2, - gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); /* head */ + intel_uncore_write(uncore, GEN7_OASTATUS2, /* head */ + gtt_offset | GEN7_OASTATUS2_MEM_SELECT_GGTT); stream->oa_buffer.head = gtt_offset; - I915_WRITE(GEN7_OABUFFER, gtt_offset); + intel_uncore_write(uncore, GEN7_OABUFFER, gtt_offset); - I915_WRITE(GEN7_OASTATUS1, gtt_offset | OABUFFER_SIZE_16M); /* tail */ + intel_uncore_write(uncore, GEN7_OASTATUS1, /* tail */ + gtt_offset | OABUFFER_SIZE_16M); /* Mark that we need updated tail pointers to read from... */ stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR; @@ -1419,7 +1440,7 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream) * already seen since they can't be cleared while periodic * sampling is enabled. */ - dev_priv->perf.gen7_latched_oastatus1 = 0; + stream->perf->gen7_latched_oastatus1 = 0; /* NB: although the OA buffer will initially be allocated * zeroed via shmfs (and so this memset is redundant when @@ -1434,25 +1455,22 @@ static void gen7_init_oa_buffer(struct i915_perf_stream *stream) */ memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); - /* Maybe make ->pollin per-stream state if we support multiple - * concurrent streams in the future. - */ stream->pollin = false; } static void gen8_init_oa_buffer(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = stream->uncore; u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); unsigned long flags; spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); - I915_WRITE(GEN8_OASTATUS, 0); - I915_WRITE(GEN8_OAHEADPTR, gtt_offset); + intel_uncore_write(uncore, GEN8_OASTATUS, 0); + intel_uncore_write(uncore, GEN8_OAHEADPTR, gtt_offset); stream->oa_buffer.head = gtt_offset; - I915_WRITE(GEN8_OABUFFER_UDW, 0); + intel_uncore_write(uncore, GEN8_OABUFFER_UDW, 0); /* * PRM says: @@ -1462,9 +1480,9 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream) * to enable proper functionality of the overflow * bit." */ - I915_WRITE(GEN8_OABUFFER, gtt_offset | + intel_uncore_write(uncore, GEN8_OABUFFER, gtt_offset | OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); - I915_WRITE(GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); + intel_uncore_write(uncore, GEN8_OATAILPTR, gtt_offset & GEN8_OATAILPTR_MASK); /* Mark that we need updated tail pointers to read from... */ stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR; @@ -1493,35 +1511,82 @@ static void gen8_init_oa_buffer(struct i915_perf_stream *stream) */ memset(stream->oa_buffer.vaddr, 0, OA_BUFFER_SIZE); + stream->pollin = false; +} + +static void gen12_init_oa_buffer(struct i915_perf_stream *stream) +{ + struct intel_uncore *uncore = stream->uncore; + u32 gtt_offset = i915_ggtt_offset(stream->oa_buffer.vma); + unsigned long flags; + + spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags); + + intel_uncore_write(uncore, GEN12_OAG_OASTATUS, 0); + intel_uncore_write(uncore, GEN12_OAG_OAHEADPTR, + gtt_offset & GEN12_OAG_OAHEADPTR_MASK); + stream->oa_buffer.head = gtt_offset; + + /* + * PRM says: + * + * "This MMIO must be set before the OATAILPTR + * register and after the OAHEADPTR register. This is + * to enable proper functionality of the overflow + * bit." + */ + intel_uncore_write(uncore, GEN12_OAG_OABUFFER, gtt_offset | + OABUFFER_SIZE_16M | GEN8_OABUFFER_MEM_SELECT_GGTT); + intel_uncore_write(uncore, GEN12_OAG_OATAILPTR, + gtt_offset & GEN12_OAG_OATAILPTR_MASK); + + /* Mark that we need updated tail pointers to read from... */ + stream->oa_buffer.tails[0].offset = INVALID_TAIL_PTR; + stream->oa_buffer.tails[1].offset = INVALID_TAIL_PTR; + + /* + * Reset state used to recognise context switches, affecting which + * reports we will forward to userspace while filtering for a single + * context. + */ + stream->oa_buffer.last_ctx_id = INVALID_CTX_ID; + + spin_unlock_irqrestore(&stream->oa_buffer.ptr_lock, flags); + /* - * Maybe make ->pollin per-stream state if we support multiple - * concurrent streams in the future. + * NB: although the OA buffer will initially be allocated + * zeroed via shmfs (and so this memset is redundant when + * first allocating), we may re-init the OA buffer, either + * when re-enabling a stream or in error/reset paths. + * + * The reason we clear the buffer for each re-init is for the + * sanity check in gen8_append_oa_reports() that looks at the + * reason field to make sure it's non-zero which relies on + * the assumption that new reports are being written to zeroed + * memory... */ + memset(stream->oa_buffer.vaddr, 0, + stream->oa_buffer.vma->size); + stream->pollin = false; } static int alloc_oa_buffer(struct i915_perf_stream *stream) { struct drm_i915_gem_object *bo; - struct drm_i915_private *dev_priv = stream->dev_priv; struct i915_vma *vma; int ret; if (WARN_ON(stream->oa_buffer.vma)) return -ENODEV; - ret = i915_mutex_lock_interruptible(&dev_priv->drm); - if (ret) - return ret; - BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE); BUILD_BUG_ON(OA_BUFFER_SIZE < SZ_128K || OA_BUFFER_SIZE > SZ_16M); - bo = i915_gem_object_create_shmem(dev_priv, OA_BUFFER_SIZE); + bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE); if (IS_ERR(bo)) { DRM_ERROR("Failed to allocate OA buffer\n"); - ret = PTR_ERR(bo); - goto unlock; + return PTR_ERR(bo); } i915_gem_object_set_cache_coherency(bo, I915_CACHE_LLC); @@ -1541,11 +1606,7 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream) goto err_unpin; } - DRM_DEBUG_DRIVER("OA Buffer initialized, gtt offset = 0x%x, vaddr = %p\n", - i915_ggtt_offset(stream->oa_buffer.vma), - stream->oa_buffer.vaddr); - - goto unlock; + return 0; err_unpin: __i915_vma_unpin(vma); @@ -1556,55 +1617,389 @@ err_unref: stream->oa_buffer.vaddr = NULL; stream->oa_buffer.vma = NULL; -unlock: - mutex_unlock(&dev_priv->drm.struct_mutex); return ret; } -static void config_oa_regs(struct drm_i915_private *dev_priv, - const struct i915_oa_reg *regs, - u32 n_regs) +static u32 *save_restore_register(struct i915_perf_stream *stream, u32 *cs, + bool save, i915_reg_t reg, u32 offset, + u32 dword_count) +{ + u32 cmd; + u32 d; + + cmd = save ? MI_STORE_REGISTER_MEM : MI_LOAD_REGISTER_MEM; + if (INTEL_GEN(stream->perf->i915) >= 8) + cmd++; + + for (d = 0; d < dword_count; d++) { + *cs++ = cmd; + *cs++ = i915_mmio_reg_offset(reg) + 4 * d; + *cs++ = intel_gt_scratch_offset(stream->engine->gt, + offset) + 4 * d; + *cs++ = 0; + } + + return cs; +} + +static int alloc_noa_wait(struct i915_perf_stream *stream) +{ + struct drm_i915_private *i915 = stream->perf->i915; + struct drm_i915_gem_object *bo; + struct i915_vma *vma; + const u64 delay_ticks = 0xffffffffffffffff - + DIV64_U64_ROUND_UP( + atomic64_read(&stream->perf->noa_programming_delay) * + RUNTIME_INFO(i915)->cs_timestamp_frequency_khz, + 1000000ull); + const u32 base = stream->engine->mmio_base; +#define CS_GPR(x) GEN8_RING_CS_GPR(base, x) + u32 *batch, *ts0, *cs, *jump; + int ret, i; + enum { + START_TS, + NOW_TS, + DELTA_TS, + JUMP_PREDICATE, + DELTA_TARGET, + N_CS_GPR + }; + + bo = i915_gem_object_create_internal(i915, 4096); + if (IS_ERR(bo)) { + DRM_ERROR("Failed to allocate NOA wait batchbuffer\n"); + return PTR_ERR(bo); + } + + /* + * We pin in GGTT because we jump into this buffer now because + * multiple OA config BOs will have a jump to this address and it + * needs to be fixed during the lifetime of the i915/perf stream. + */ + vma = i915_gem_object_ggtt_pin(bo, NULL, 0, 0, PIN_HIGH); + if (IS_ERR(vma)) { + ret = PTR_ERR(vma); + goto err_unref; + } + + batch = cs = i915_gem_object_pin_map(bo, I915_MAP_WB); + if (IS_ERR(batch)) { + ret = PTR_ERR(batch); + goto err_unpin; + } + + /* Save registers. */ + for (i = 0; i < N_CS_GPR; i++) + cs = save_restore_register( + stream, cs, true /* save */, CS_GPR(i), + INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); + cs = save_restore_register( + stream, cs, true /* save */, MI_PREDICATE_RESULT_1, + INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); + + /* First timestamp snapshot location. */ + ts0 = cs; + + /* + * Initial snapshot of the timestamp register to implement the wait. + * We work with 32b values, so clear out the top 32b bits of the + * register because the ALU works 64bits. + */ + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)) + 4; + *cs++ = 0; + *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); + *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); + *cs++ = i915_mmio_reg_offset(CS_GPR(START_TS)); + + /* + * This is the location we're going to jump back into until the + * required amount of time has passed. + */ + jump = cs; + + /* + * Take another snapshot of the timestamp register. Take care to clear + * up the top 32bits of CS_GPR(1) as we're using it for other + * operations below. + */ + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)) + 4; + *cs++ = 0; + *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); + *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(base)); + *cs++ = i915_mmio_reg_offset(CS_GPR(NOW_TS)); + + /* + * Do a diff between the 2 timestamps and store the result back into + * CS_GPR(1). + */ + *cs++ = MI_MATH(5); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(NOW_TS)); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(START_TS)); + *cs++ = MI_MATH_SUB; + *cs++ = MI_MATH_STORE(MI_MATH_REG(DELTA_TS), MI_MATH_REG_ACCU); + *cs++ = MI_MATH_STORE(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); + + /* + * Transfer the carry flag (set to 1 if ts1 < ts0, meaning the + * timestamp have rolled over the 32bits) into the predicate register + * to be used for the predicated jump. + */ + *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); + *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); + *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1); + + /* Restart from the beginning if we had timestamps roll over. */ + *cs++ = (INTEL_GEN(i915) < 8 ? + MI_BATCH_BUFFER_START : + MI_BATCH_BUFFER_START_GEN8) | + MI_BATCH_PREDICATE; + *cs++ = i915_ggtt_offset(vma) + (ts0 - batch) * 4; + *cs++ = 0; + + /* + * Now add the diff between to previous timestamps and add it to : + * (((1 * << 64) - 1) - delay_ns) + * + * When the Carry Flag contains 1 this means the elapsed time is + * longer than the expected delay, and we can exit the wait loop. + */ + *cs++ = MI_LOAD_REGISTER_IMM(2); + *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)); + *cs++ = lower_32_bits(delay_ticks); + *cs++ = i915_mmio_reg_offset(CS_GPR(DELTA_TARGET)) + 4; + *cs++ = upper_32_bits(delay_ticks); + + *cs++ = MI_MATH(4); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCA, MI_MATH_REG(DELTA_TS)); + *cs++ = MI_MATH_LOAD(MI_MATH_REG_SRCB, MI_MATH_REG(DELTA_TARGET)); + *cs++ = MI_MATH_ADD; + *cs++ = MI_MATH_STOREINV(MI_MATH_REG(JUMP_PREDICATE), MI_MATH_REG_CF); + + /* + * Transfer the result into the predicate register to be used for the + * predicated jump. + */ + *cs++ = MI_LOAD_REGISTER_REG | (3 - 2); + *cs++ = i915_mmio_reg_offset(CS_GPR(JUMP_PREDICATE)); + *cs++ = i915_mmio_reg_offset(MI_PREDICATE_RESULT_1); + + /* Predicate the jump. */ + *cs++ = (INTEL_GEN(i915) < 8 ? + MI_BATCH_BUFFER_START : + MI_BATCH_BUFFER_START_GEN8) | + MI_BATCH_PREDICATE; + *cs++ = i915_ggtt_offset(vma) + (jump - batch) * 4; + *cs++ = 0; + + /* Restore registers. */ + for (i = 0; i < N_CS_GPR; i++) + cs = save_restore_register( + stream, cs, false /* restore */, CS_GPR(i), + INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR + 8 * i, 2); + cs = save_restore_register( + stream, cs, false /* restore */, MI_PREDICATE_RESULT_1, + INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1, 1); + + /* And return to the ring. */ + *cs++ = MI_BATCH_BUFFER_END; + + GEM_BUG_ON(cs - batch > PAGE_SIZE / sizeof(*batch)); + + i915_gem_object_flush_map(bo); + i915_gem_object_unpin_map(bo); + + stream->noa_wait = vma; + return 0; + +err_unpin: + i915_vma_unpin_and_release(&vma, 0); +err_unref: + i915_gem_object_put(bo); + return ret; +} + +static u32 *write_cs_mi_lri(u32 *cs, + const struct i915_oa_reg *reg_data, + u32 n_regs) { u32 i; for (i = 0; i < n_regs; i++) { - const struct i915_oa_reg *reg = regs + i; + if ((i % MI_LOAD_REGISTER_IMM_MAX_REGS) == 0) { + u32 n_lri = min_t(u32, + n_regs - i, + MI_LOAD_REGISTER_IMM_MAX_REGS); - I915_WRITE(reg->addr, reg->value); + *cs++ = MI_LOAD_REGISTER_IMM(n_lri); + } + *cs++ = i915_mmio_reg_offset(reg_data[i].addr); + *cs++ = reg_data[i].value; } + + return cs; } -static void delay_after_mux(void) +static int num_lri_dwords(int num_regs) { + int count = 0; + + if (num_regs > 0) { + count += DIV_ROUND_UP(num_regs, MI_LOAD_REGISTER_IMM_MAX_REGS); + count += num_regs * 2; + } + + return count; +} + +static struct i915_oa_config_bo * +alloc_oa_config_buffer(struct i915_perf_stream *stream, + struct i915_oa_config *oa_config) +{ + struct drm_i915_gem_object *obj; + struct i915_oa_config_bo *oa_bo; + size_t config_length = 0; + u32 *cs; + int err; + + oa_bo = kzalloc(sizeof(*oa_bo), GFP_KERNEL); + if (!oa_bo) + return ERR_PTR(-ENOMEM); + + config_length += num_lri_dwords(oa_config->mux_regs_len); + config_length += num_lri_dwords(oa_config->b_counter_regs_len); + config_length += num_lri_dwords(oa_config->flex_regs_len); + config_length += 3; /* MI_BATCH_BUFFER_START */ + config_length = ALIGN(sizeof(u32) * config_length, I915_GTT_PAGE_SIZE); + + obj = i915_gem_object_create_shmem(stream->perf->i915, config_length); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_free; + } + + cs = i915_gem_object_pin_map(obj, I915_MAP_WB); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto err_oa_bo; + } + + cs = write_cs_mi_lri(cs, + oa_config->mux_regs, + oa_config->mux_regs_len); + cs = write_cs_mi_lri(cs, + oa_config->b_counter_regs, + oa_config->b_counter_regs_len); + cs = write_cs_mi_lri(cs, + oa_config->flex_regs, + oa_config->flex_regs_len); + + /* Jump into the active wait. */ + *cs++ = (INTEL_GEN(stream->perf->i915) < 8 ? + MI_BATCH_BUFFER_START : + MI_BATCH_BUFFER_START_GEN8); + *cs++ = i915_ggtt_offset(stream->noa_wait); + *cs++ = 0; + + i915_gem_object_flush_map(obj); + i915_gem_object_unpin_map(obj); + + oa_bo->vma = i915_vma_instance(obj, + &stream->engine->gt->ggtt->vm, + NULL); + if (IS_ERR(oa_bo->vma)) { + err = PTR_ERR(oa_bo->vma); + goto err_oa_bo; + } + + oa_bo->oa_config = i915_oa_config_get(oa_config); + llist_add(&oa_bo->node, &stream->oa_config_bos); + + return oa_bo; + +err_oa_bo: + i915_gem_object_put(obj); +err_free: + kfree(oa_bo); + return ERR_PTR(err); +} + +static struct i915_vma * +get_oa_vma(struct i915_perf_stream *stream, struct i915_oa_config *oa_config) +{ + struct i915_oa_config_bo *oa_bo; + /* - * It apparently takes a fairly long time for a new MUX - * configuration to be be applied after these register writes. - * This delay duration was derived empirically based on the - * render_basic config but hopefully it covers the maximum - * configuration latency. - * - * As a fallback, the checks in _append_oa_reports() to skip - * invalid OA reports do also seem to work to discard reports - * generated before this config has completed - albeit not - * silently. - * - * Unfortunately this is essentially a magic number, since we - * don't currently know of a reliable mechanism for predicting - * how long the MUX config will take to apply and besides - * seeing invalid reports we don't know of a reliable way to - * explicitly check that the MUX config has landed. - * - * It's even possible we've miss characterized the underlying - * problem - it just seems like the simplest explanation why - * a delay at this location would mitigate any invalid reports. + * Look for the buffer in the already allocated BOs attached + * to the stream. */ - usleep_range(15000, 20000); + llist_for_each_entry(oa_bo, stream->oa_config_bos.first, node) { + if (oa_bo->oa_config == oa_config && + memcmp(oa_bo->oa_config->uuid, + oa_config->uuid, + sizeof(oa_config->uuid)) == 0) + goto out; + } + + oa_bo = alloc_oa_config_buffer(stream, oa_config); + if (IS_ERR(oa_bo)) + return ERR_CAST(oa_bo); + +out: + return i915_vma_get(oa_bo->vma); +} + +static int emit_oa_config(struct i915_perf_stream *stream, + struct i915_oa_config *oa_config, + struct intel_context *ce) +{ + struct i915_request *rq; + struct i915_vma *vma; + int err; + + vma = get_oa_vma(stream, oa_config); + if (IS_ERR(vma)) + return PTR_ERR(vma); + + err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH); + if (err) + goto err_vma_put; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto err_vma_unpin; + } + + i915_vma_lock(vma); + err = i915_request_await_object(rq, vma->obj, 0); + if (!err) + err = i915_vma_move_to_active(vma, rq, 0); + i915_vma_unlock(vma); + if (err) + goto err_add_request; + + err = rq->engine->emit_bb_start(rq, + vma->node.start, 0, + I915_DISPATCH_SECURE); +err_add_request: + i915_request_add(rq); +err_vma_unpin: + i915_vma_unpin(vma); +err_vma_put: + i915_vma_put(vma); + return err; +} + +static struct intel_context *oa_context(struct i915_perf_stream *stream) +{ + return stream->pinned_ctx ?: stream->engine->kernel_context; } static int hsw_enable_metric_set(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; - const struct i915_oa_config *oa_config = stream->oa_config; + struct intel_uncore *uncore = stream->uncore; /* * PRM: @@ -1616,31 +2011,24 @@ static int hsw_enable_metric_set(struct i915_perf_stream *stream) * count the events from non-render domain. Unit level clock * gating for RCS should also be disabled. */ - I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & - ~GEN7_DOP_CLOCK_GATE_ENABLE)); - I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) | - GEN6_CSUNIT_CLOCK_GATE_DISABLE)); - - config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); - delay_after_mux(); - - config_oa_regs(dev_priv, oa_config->b_counter_regs, - oa_config->b_counter_regs_len); + intel_uncore_rmw(uncore, GEN7_MISCCPCTL, + GEN7_DOP_CLOCK_GATE_ENABLE, 0); + intel_uncore_rmw(uncore, GEN6_UCGCTL1, + 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE); - return 0; + return emit_oa_config(stream, stream->oa_config, oa_context(stream)); } static void hsw_disable_metric_set(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = stream->uncore; - I915_WRITE(GEN6_UCGCTL1, (I915_READ(GEN6_UCGCTL1) & - ~GEN6_CSUNIT_CLOCK_GATE_DISABLE)); - I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) | - GEN7_DOP_CLOCK_GATE_ENABLE)); + intel_uncore_rmw(uncore, GEN6_UCGCTL1, + GEN6_CSUNIT_CLOCK_GATE_DISABLE, 0); + intel_uncore_rmw(uncore, GEN7_MISCCPCTL, + 0, GEN7_DOP_CLOCK_GATE_ENABLE); - I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & - ~GT_NOA_ENABLE)); + intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); } static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config, @@ -1672,14 +2060,11 @@ static u32 oa_config_flex_reg(const struct i915_oa_config *oa_config, * in the case that the OA unit has been disabled. */ static void -gen8_update_reg_state_unlocked(struct i915_perf_stream *stream, - struct intel_context *ce, - u32 *reg_state, - const struct i915_oa_config *oa_config) -{ - struct drm_i915_private *i915 = ce->engine->i915; - u32 ctx_oactxctrl = i915->perf.ctx_oactxctrl_offset; - u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset; +gen8_update_reg_state_unlocked(const struct intel_context *ce, + const struct i915_perf_stream *stream) +{ + u32 ctx_oactxctrl = stream->perf->ctx_oactxctrl_offset; + u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; /* The MMIO offsets for Flex EU registers aren't contiguous */ i915_reg_t flex_regs[] = { EU_PERF_CNTL0, @@ -1690,21 +2075,28 @@ gen8_update_reg_state_unlocked(struct i915_perf_stream *stream, EU_PERF_CNTL5, EU_PERF_CNTL6, }; + u32 *reg_state = ce->lrc_reg_state; int i; - CTX_REG(reg_state, ctx_oactxctrl, GEN8_OACTXCONTROL, - (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | - (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | - GEN8_OA_COUNTER_RESUME); + if (IS_GEN(stream->perf->i915, 12)) { + u32 format = stream->oa_buffer.format; - for (i = 0; i < ARRAY_SIZE(flex_regs); i++) { - CTX_REG(reg_state, ctx_flexeu0 + i * 2, flex_regs[i], - oa_config_flex_reg(oa_config, flex_regs[i])); + reg_state[ctx_oactxctrl + 1] = + (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) | + (stream->oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0); + } else { + reg_state[ctx_oactxctrl + 1] = + (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | + (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | + GEN8_OA_COUNTER_RESUME; } - CTX_REG(reg_state, - CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE, - intel_sseu_make_rpcs(i915, &ce->sseu)); + for (i = 0; !!ctx_flexeu0 && i < ARRAY_SIZE(flex_regs); i++) + reg_state[ctx_flexeu0 + i * 2 + 1] = + oa_config_flex_reg(stream->oa_config, flex_regs[i]); + + reg_state[CTX_R_PWR_CLK_STATE] = + intel_sseu_make_rpcs(ce->engine->i915, &ce->sseu); } struct flex { @@ -1728,7 +2120,7 @@ gen8_store_flex(struct i915_request *rq, offset = i915_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE; do { *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT; - *cs++ = offset + (flex->offset + 1) * sizeof(u32); + *cs++ = offset + flex->offset * sizeof(u32); *cs++ = 0; *cs++ = flex->value; } while (flex++, --count); @@ -1832,6 +2224,36 @@ static int gen8_configure_context(struct i915_gem_context *ctx, return err; } +static int gen12_emit_oar_config(struct intel_context *ce, bool enable) +{ + struct i915_request *rq; + u32 *cs; + int err = 0; + + rq = i915_request_create(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + cs = intel_ring_begin(rq, 4); + if (IS_ERR(cs)) { + err = PTR_ERR(cs); + goto out; + } + + *cs++ = MI_LOAD_REGISTER_IMM(1); + *cs++ = i915_mmio_reg_offset(RING_CONTEXT_CONTROL(ce->engine->mmio_base)); + *cs++ = _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE, + enable ? GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE : 0); + *cs++ = MI_NOOP; + + intel_ring_advance(rq, cs); + +out: + i915_request_add(rq); + + return err; +} + /* * Manages updating the per-context aspects of the OA stream * configuration across all contexts. @@ -1856,24 +2278,22 @@ static int gen8_configure_context(struct i915_gem_context *ctx, * * Note: it's only the RCS/Render context that has any OA state. */ -static int gen8_configure_all_contexts(struct i915_perf_stream *stream, - const struct i915_oa_config *oa_config) +static int lrc_configure_all_contexts(struct i915_perf_stream *stream, + const struct i915_oa_config *oa_config) { - struct drm_i915_private *i915 = stream->dev_priv; + struct drm_i915_private *i915 = stream->perf->i915; /* The MMIO offsets for Flex EU registers aren't contiguous */ - const u32 ctx_flexeu0 = i915->perf.ctx_flexeu0_offset; -#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N)) + const u32 ctx_flexeu0 = stream->perf->ctx_flexeu0_offset; +#define ctx_flexeuN(N) (ctx_flexeu0 + 2 * (N) + 1) struct flex regs[] = { { GEN8_R_PWR_CLK_STATE, CTX_R_PWR_CLK_STATE, }, { - GEN8_OACTXCONTROL, - i915->perf.ctx_oactxctrl_offset, - ((stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | - (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | - GEN8_OA_COUNTER_RESUME) + IS_GEN(i915, 12) ? + GEN12_OAR_OACONTROL : GEN8_OACTXCONTROL, + stream->perf->ctx_oactxctrl_offset + 1, }, { EU_PERF_CNTL0, ctx_flexeuN(0) }, { EU_PERF_CNTL1, ctx_flexeuN(1) }, @@ -1885,13 +2305,27 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream, }; #undef ctx_flexeuN struct intel_engine_cs *engine; - struct i915_gem_context *ctx; - int i; + struct i915_gem_context *ctx, *cn; + size_t array_size = IS_GEN(i915, 12) ? 2 : ARRAY_SIZE(regs); + int i, err; - for (i = 2; i < ARRAY_SIZE(regs); i++) + if (IS_GEN(i915, 12)) { + u32 format = stream->oa_buffer.format; + + regs[1].value = + (format << GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT) | + (oa_config ? GEN12_OAR_OACONTROL_COUNTER_ENABLE : 0); + } else { + regs[1].value = + (stream->period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | + (stream->periodic ? GEN8_OA_TIMER_ENABLE : 0) | + GEN8_OA_COUNTER_RESUME; + } + + for (i = 2; !!ctx_flexeu0 && i < array_size; i++) regs[i].value = oa_config_flex_reg(oa_config, regs[i].reg); - lockdep_assert_held(&i915->drm.struct_mutex); + lockdep_assert_held(&stream->perf->lock); /* * The OA register config is setup through the context image. This image @@ -1909,16 +2343,27 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream, * context. Contexts idle at the time of reconfiguration are not * trapped behind the barrier. */ - list_for_each_entry(ctx, &i915->contexts.list, link) { - int err; - + spin_lock(&i915->gem.contexts.lock); + list_for_each_entry_safe(ctx, cn, &i915->gem.contexts.list, link) { if (ctx == i915->kernel_context) continue; - err = gen8_configure_context(ctx, regs, ARRAY_SIZE(regs)); - if (err) + if (!kref_get_unless_zero(&ctx->ref)) + continue; + + spin_unlock(&i915->gem.contexts.lock); + + err = gen8_configure_context(ctx, regs, array_size); + if (err) { + i915_gem_context_put(ctx); return err; + } + + spin_lock(&i915->gem.contexts.lock); + list_safe_reset_next(ctx, cn, link); + i915_gem_context_put(ctx); } + spin_unlock(&i915->gem.contexts.lock); /* * After updating all other contexts, we need to modify ourselves. @@ -1927,14 +2372,13 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream, */ for_each_uabi_engine(engine, i915) { struct intel_context *ce = engine->kernel_context; - int err; if (engine->class != RENDER_CLASS) continue; regs[0].value = intel_sseu_make_rpcs(i915, &ce->sseu); - err = gen8_modify_self(ce, regs, ARRAY_SIZE(regs)); + err = gen8_modify_self(ce, regs, array_size); if (err) return err; } @@ -1944,8 +2388,8 @@ static int gen8_configure_all_contexts(struct i915_perf_stream *stream, static int gen8_enable_metric_set(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; - const struct i915_oa_config *oa_config = stream->oa_config; + struct intel_uncore *uncore = stream->uncore; + struct i915_oa_config *oa_config = stream->oa_config; int ret; /* @@ -1971,10 +2415,10 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream) * be read back from automatically triggered reports, as part of the * RPT_ID field. */ - if (IS_GEN_RANGE(dev_priv, 9, 11)) { - I915_WRITE(GEN8_OA_DEBUG, - _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | - GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); + if (IS_GEN_RANGE(stream->perf->i915, 9, 11)) { + intel_uncore_write(uncore, GEN8_OA_DEBUG, + _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | + GEN9_OA_DEBUG_INCLUDE_CLK_RATIO)); } /* @@ -1982,45 +2426,102 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream) * to make sure all slices/subslices are ON before writing to NOA * registers. */ - ret = gen8_configure_all_contexts(stream, oa_config); + ret = lrc_configure_all_contexts(stream, oa_config); if (ret) return ret; - config_oa_regs(dev_priv, oa_config->mux_regs, oa_config->mux_regs_len); - delay_after_mux(); + return emit_oa_config(stream, oa_config, oa_context(stream)); +} + +static int gen12_enable_metric_set(struct i915_perf_stream *stream) +{ + struct intel_uncore *uncore = stream->uncore; + struct i915_oa_config *oa_config = stream->oa_config; + bool periodic = stream->periodic; + u32 period_exponent = stream->period_exponent; + int ret; - config_oa_regs(dev_priv, oa_config->b_counter_regs, - oa_config->b_counter_regs_len); + intel_uncore_write(uncore, GEN12_OAG_OA_DEBUG, + /* Disable clk ratio reports, like previous Gens. */ + _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS | + GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) | + /* + * If the user didn't require OA reports, instruct the + * hardware not to emit ctx switch reports. + */ + !(stream->sample_flags & SAMPLE_OA_REPORT) ? + _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS) : + _MASKED_BIT_DISABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS)); + + intel_uncore_write(uncore, GEN12_OAG_OAGLBCTXCTRL, periodic ? + (GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME | + GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE | + (period_exponent << GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT)) + : 0); - return 0; + /* + * Update all contexts prior writing the mux configurations as we need + * to make sure all slices/subslices are ON before writing to NOA + * registers. + */ + ret = lrc_configure_all_contexts(stream, oa_config); + if (ret) + return ret; + + /* + * For Gen12, performance counters are context + * saved/restored. Only enable it for the context that + * requested this. + */ + if (stream->ctx) { + ret = gen12_emit_oar_config(stream->pinned_ctx, + oa_config != NULL); + if (ret) + return ret; + } + + return emit_oa_config(stream, oa_config, oa_context(stream)); } static void gen8_disable_metric_set(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = stream->uncore; /* Reset all contexts' slices/subslices configurations. */ - gen8_configure_all_contexts(stream, NULL); + lrc_configure_all_contexts(stream, NULL); - I915_WRITE(GDT_CHICKEN_BITS, (I915_READ(GDT_CHICKEN_BITS) & - ~GT_NOA_ENABLE)); + intel_uncore_rmw(uncore, GDT_CHICKEN_BITS, GT_NOA_ENABLE, 0); } static void gen10_disable_metric_set(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = stream->uncore; + + /* Reset all contexts' slices/subslices configurations. */ + lrc_configure_all_contexts(stream, NULL); + + /* Make sure we disable noa to save power. */ + intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); +} + +static void gen12_disable_metric_set(struct i915_perf_stream *stream) +{ + struct intel_uncore *uncore = stream->uncore; /* Reset all contexts' slices/subslices configurations. */ - gen8_configure_all_contexts(stream, NULL); + lrc_configure_all_contexts(stream, NULL); + + /* disable the context save/restore or OAR counters */ + if (stream->ctx) + gen12_emit_oar_config(stream->pinned_ctx, false); /* Make sure we disable noa to save power. */ - I915_WRITE(RPM_CONFIG1, - I915_READ(RPM_CONFIG1) & ~GEN10_GT_NOA_ENABLE); + intel_uncore_rmw(uncore, RPM_CONFIG1, GEN10_GT_NOA_ENABLE, 0); } static void gen7_oa_enable(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = stream->uncore; struct i915_gem_context *ctx = stream->ctx; u32 ctx_id = stream->specific_ctx_id; bool periodic = stream->periodic; @@ -2038,19 +2539,19 @@ static void gen7_oa_enable(struct i915_perf_stream *stream) */ gen7_init_oa_buffer(stream); - I915_WRITE(GEN7_OACONTROL, - (ctx_id & GEN7_OACONTROL_CTX_MASK) | - (period_exponent << - GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | - (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | - (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | - (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | - GEN7_OACONTROL_ENABLE); + intel_uncore_write(uncore, GEN7_OACONTROL, + (ctx_id & GEN7_OACONTROL_CTX_MASK) | + (period_exponent << + GEN7_OACONTROL_TIMER_PERIOD_SHIFT) | + (periodic ? GEN7_OACONTROL_TIMER_ENABLE : 0) | + (report_format << GEN7_OACONTROL_FORMAT_SHIFT) | + (ctx ? GEN7_OACONTROL_PER_CTX_ENABLE : 0) | + GEN7_OACONTROL_ENABLE); } static void gen8_oa_enable(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct intel_uncore *uncore = stream->uncore; u32 report_format = stream->oa_buffer.format; /* @@ -2069,9 +2570,28 @@ static void gen8_oa_enable(struct i915_perf_stream *stream) * filtering and instead filter on the cpu based on the context-id * field of reports */ - I915_WRITE(GEN8_OACONTROL, (report_format << - GEN8_OA_REPORT_FORMAT_SHIFT) | - GEN8_OA_COUNTER_ENABLE); + intel_uncore_write(uncore, GEN8_OACONTROL, + (report_format << GEN8_OA_REPORT_FORMAT_SHIFT) | + GEN8_OA_COUNTER_ENABLE); +} + +static void gen12_oa_enable(struct i915_perf_stream *stream) +{ + struct intel_uncore *uncore = stream->uncore; + u32 report_format = stream->oa_buffer.format; + + /* + * If we don't want OA reports from the OA buffer, then we don't even + * need to program the OAG unit. + */ + if (!(stream->sample_flags & SAMPLE_OA_REPORT)) + return; + + gen12_init_oa_buffer(stream); + + intel_uncore_write(uncore, GEN12_OAG_OACONTROL, + (report_format << GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT) | + GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE); } /** @@ -2085,9 +2605,7 @@ static void gen8_oa_enable(struct i915_perf_stream *stream) */ static void i915_oa_stream_enable(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; - - dev_priv->perf.ops.oa_enable(stream); + stream->perf->ops.oa_enable(stream); if (stream->periodic) hrtimer_start(&stream->poll_check_timer, @@ -2097,7 +2615,7 @@ static void i915_oa_stream_enable(struct i915_perf_stream *stream) static void gen7_oa_disable(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = &stream->dev_priv->uncore; + struct intel_uncore *uncore = stream->uncore; intel_uncore_write(uncore, GEN7_OACONTROL, 0); if (intel_wait_for_register(uncore, @@ -2108,7 +2626,7 @@ static void gen7_oa_disable(struct i915_perf_stream *stream) static void gen8_oa_disable(struct i915_perf_stream *stream) { - struct intel_uncore *uncore = &stream->dev_priv->uncore; + struct intel_uncore *uncore = stream->uncore; intel_uncore_write(uncore, GEN8_OACONTROL, 0); if (intel_wait_for_register(uncore, @@ -2117,6 +2635,18 @@ static void gen8_oa_disable(struct i915_perf_stream *stream) DRM_ERROR("wait for OA to be disabled timed out\n"); } +static void gen12_oa_disable(struct i915_perf_stream *stream) +{ + struct intel_uncore *uncore = stream->uncore; + + intel_uncore_write(uncore, GEN12_OAG_OACONTROL, 0); + if (intel_wait_for_register(uncore, + GEN12_OAG_OACONTROL, + GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0, + 50)) + DRM_ERROR("wait for OA to be disabled timed out\n"); +} + /** * i915_oa_stream_disable - handle `I915_PERF_IOCTL_DISABLE` for OA stream * @stream: An i915 perf stream opened for OA metrics @@ -2127,9 +2657,7 @@ static void gen8_oa_disable(struct i915_perf_stream *stream) */ static void i915_oa_stream_disable(struct i915_perf_stream *stream) { - struct drm_i915_private *dev_priv = stream->dev_priv; - - dev_priv->perf.ops.oa_disable(stream); + stream->perf->ops.oa_disable(stream); if (stream->periodic) hrtimer_cancel(&stream->poll_check_timer); @@ -2166,15 +2694,21 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, struct drm_i915_perf_open_param *param, struct perf_open_properties *props) { - struct drm_i915_private *dev_priv = stream->dev_priv; + struct i915_perf *perf = stream->perf; int format_size; int ret; - /* If the sysfs metrics/ directory wasn't registered for some + if (!props->engine) { + DRM_DEBUG("OA engine not specified\n"); + return -EINVAL; + } + + /* + * If the sysfs metrics/ directory wasn't registered for some * reason then don't let userspace try their luck with config * IDs */ - if (!dev_priv->perf.metrics_kobj) { + if (!perf->metrics_kobj) { DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); return -EINVAL; } @@ -2184,16 +2718,17 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return -EINVAL; } - if (!dev_priv->perf.ops.enable_metric_set) { + if (!perf->ops.enable_metric_set) { DRM_DEBUG("OA unit not supported\n"); return -ENODEV; } - /* To avoid the complexity of having to accurately filter + /* + * To avoid the complexity of having to accurately filter * counter reports and marshal to the appropriate client * we currently only allow exclusive access */ - if (dev_priv->perf.exclusive_stream) { + if (perf->exclusive_stream) { DRM_DEBUG("OA unit already in use\n"); return -EBUSY; } @@ -2203,9 +2738,12 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return -EINVAL; } + stream->engine = props->engine; + stream->uncore = stream->engine->gt->uncore; + stream->sample_size = sizeof(struct drm_i915_perf_record_header); - format_size = dev_priv->perf.oa_formats[props->oa_format].size; + format_size = perf->oa_formats[props->oa_format].size; stream->sample_flags |= SAMPLE_OA_REPORT; stream->sample_size += format_size; @@ -2214,8 +2752,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, if (WARN_ON(stream->oa_buffer.format_size == 0)) return -EINVAL; + stream->hold_preemption = props->hold_preemption; + stream->oa_buffer.format = - dev_priv->perf.oa_formats[props->oa_format].format; + perf->oa_formats[props->oa_format].format; stream->periodic = props->oa_periodic; if (stream->periodic) @@ -2229,9 +2769,16 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, } } - ret = get_oa_config(dev_priv, props->metrics_set, &stream->oa_config); + ret = alloc_noa_wait(stream); if (ret) { + DRM_DEBUG("Unable to allocate NOA wait batch buffer\n"); + goto err_noa_wait_alloc; + } + + stream->oa_config = i915_perf_get_oa_config(perf, props->metrics_set); + if (!stream->oa_config) { DRM_DEBUG("Invalid OA config id=%i\n", props->metrics_set); + ret = -EINVAL; goto err_config; } @@ -2247,27 +2794,24 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, * In our case we are expecting that taking pm + FORCEWAKE * references will effectively disable RC6. */ - stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); + intel_engine_pm_get(stream->engine); + intel_uncore_forcewake_get(stream->uncore, FORCEWAKE_ALL); ret = alloc_oa_buffer(stream); if (ret) goto err_oa_buf_alloc; - ret = i915_mutex_lock_interruptible(&dev_priv->drm); - if (ret) - goto err_lock; - stream->ops = &i915_oa_stream_ops; - dev_priv->perf.exclusive_stream = stream; + perf->exclusive_stream = stream; - ret = dev_priv->perf.ops.enable_metric_set(stream); + ret = perf->ops.enable_metric_set(stream); if (ret) { DRM_DEBUG("Unable to enable metric set\n"); goto err_enable; } - mutex_unlock(&dev_priv->drm.struct_mutex); + DRM_DEBUG("opening stream oa config uuid=%s\n", + stream->oa_config->uuid); hrtimer_init(&stream->poll_check_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); @@ -2278,38 +2822,40 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return 0; err_enable: - dev_priv->perf.exclusive_stream = NULL; - dev_priv->perf.ops.disable_metric_set(stream); - mutex_unlock(&dev_priv->drm.struct_mutex); + perf->exclusive_stream = NULL; + perf->ops.disable_metric_set(stream); -err_lock: free_oa_buffer(stream); err_oa_buf_alloc: - put_oa_config(dev_priv, stream->oa_config); + free_oa_configs(stream); - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref); + intel_uncore_forcewake_put(stream->uncore, FORCEWAKE_ALL); + intel_engine_pm_put(stream->engine); err_config: + free_noa_wait(stream); + +err_noa_wait_alloc: if (stream->ctx) oa_put_render_ctx_id(stream); return ret; } -void i915_oa_init_reg_state(struct intel_engine_cs *engine, - struct intel_context *ce, - u32 *regs) +void i915_oa_init_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine) { struct i915_perf_stream *stream; + /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */ + if (engine->class != RENDER_CLASS) return; stream = engine->i915->perf.exclusive_stream; if (stream) - gen8_update_reg_state_unlocked(stream, ce, regs, stream->oa_config); + gen8_update_reg_state_unlocked(ce, stream); } /** @@ -2379,7 +2925,7 @@ static ssize_t i915_perf_read(struct file *file, loff_t *ppos) { struct i915_perf_stream *stream = file->private_data; - struct drm_i915_private *dev_priv = stream->dev_priv; + struct i915_perf *perf = stream->perf; ssize_t ret; /* To ensure it's handled consistently we simply treat all reads of a @@ -2402,15 +2948,15 @@ static ssize_t i915_perf_read(struct file *file, if (ret) return ret; - mutex_lock(&dev_priv->perf.lock); + mutex_lock(&perf->lock); ret = i915_perf_read_locked(stream, file, buf, count, ppos); - mutex_unlock(&dev_priv->perf.lock); + mutex_unlock(&perf->lock); } while (ret == -EAGAIN); } else { - mutex_lock(&dev_priv->perf.lock); + mutex_lock(&perf->lock); ret = i915_perf_read_locked(stream, file, buf, count, ppos); - mutex_unlock(&dev_priv->perf.lock); + mutex_unlock(&perf->lock); } /* We allow the poll checking to sometimes report false positive EPOLLIN @@ -2448,7 +2994,6 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) /** * i915_perf_poll_locked - poll_wait() with a suitable wait queue for stream - * @dev_priv: i915 device instance * @stream: An i915 perf stream * @file: An i915 perf stream file * @wait: poll() state table @@ -2457,15 +3002,14 @@ static enum hrtimer_restart oa_poll_check_timer_cb(struct hrtimer *hrtimer) * &i915_perf_stream_ops->poll_wait to call poll_wait() with a wait queue that * will be woken for new stream data. * - * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize + * Note: The &perf->lock mutex has been taken to serialize * with any non-file-operation driver hooks. * * Returns: any poll events that are ready without sleeping */ -static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv, - struct i915_perf_stream *stream, - struct file *file, - poll_table *wait) +static __poll_t i915_perf_poll_locked(struct i915_perf_stream *stream, + struct file *file, + poll_table *wait) { __poll_t events = 0; @@ -2499,12 +3043,12 @@ static __poll_t i915_perf_poll_locked(struct drm_i915_private *dev_priv, static __poll_t i915_perf_poll(struct file *file, poll_table *wait) { struct i915_perf_stream *stream = file->private_data; - struct drm_i915_private *dev_priv = stream->dev_priv; + struct i915_perf *perf = stream->perf; __poll_t ret; - mutex_lock(&dev_priv->perf.lock); - ret = i915_perf_poll_locked(dev_priv, stream, file, wait); - mutex_unlock(&dev_priv->perf.lock); + mutex_lock(&perf->lock); + ret = i915_perf_poll_locked(stream, file, wait); + mutex_unlock(&perf->lock); return ret; } @@ -2529,6 +3073,9 @@ static void i915_perf_enable_locked(struct i915_perf_stream *stream) if (stream->ops->enable) stream->ops->enable(stream); + + if (stream->hold_preemption) + i915_gem_context_set_nopreempt(stream->ctx); } /** @@ -2553,17 +3100,54 @@ static void i915_perf_disable_locked(struct i915_perf_stream *stream) /* Allow stream->ops->disable() to refer to this */ stream->enabled = false; + if (stream->hold_preemption) + i915_gem_context_clear_nopreempt(stream->ctx); + if (stream->ops->disable) stream->ops->disable(stream); } +static long i915_perf_config_locked(struct i915_perf_stream *stream, + unsigned long metrics_set) +{ + struct i915_oa_config *config; + long ret = stream->oa_config->id; + + config = i915_perf_get_oa_config(stream->perf, metrics_set); + if (!config) + return -EINVAL; + + if (config != stream->oa_config) { + int err; + + /* + * If OA is bound to a specific context, emit the + * reconfiguration inline from that context. The update + * will then be ordered with respect to submission on that + * context. + * + * When set globally, we use a low priority kernel context, + * so it will effectively take effect when idle. + */ + err = emit_oa_config(stream, config, oa_context(stream)); + if (err == 0) + config = xchg(&stream->oa_config, config); + else + ret = err; + } + + i915_oa_config_put(config); + + return ret; +} + /** * i915_perf_ioctl - support ioctl() usage with i915 perf stream FDs * @stream: An i915 perf stream * @cmd: the ioctl request * @arg: the ioctl data * - * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize + * Note: The &perf->lock mutex has been taken to serialize * with any non-file-operation driver hooks. * * Returns: zero on success or a negative error code. Returns -EINVAL for @@ -2580,6 +3164,8 @@ static long i915_perf_ioctl_locked(struct i915_perf_stream *stream, case I915_PERF_IOCTL_DISABLE: i915_perf_disable_locked(stream); return 0; + case I915_PERF_IOCTL_CONFIG: + return i915_perf_config_locked(stream, arg); } return -EINVAL; @@ -2601,12 +3187,12 @@ static long i915_perf_ioctl(struct file *file, unsigned long arg) { struct i915_perf_stream *stream = file->private_data; - struct drm_i915_private *dev_priv = stream->dev_priv; + struct i915_perf *perf = stream->perf; long ret; - mutex_lock(&dev_priv->perf.lock); + mutex_lock(&perf->lock); ret = i915_perf_ioctl_locked(stream, cmd, arg); - mutex_unlock(&dev_priv->perf.lock); + mutex_unlock(&perf->lock); return ret; } @@ -2618,7 +3204,7 @@ static long i915_perf_ioctl(struct file *file, * Frees all resources associated with the given i915 perf @stream, disabling * any associated data capture in the process. * - * Note: The &drm_i915_private->perf.lock mutex has been taken to serialize + * Note: The &perf->lock mutex has been taken to serialize * with any non-file-operation driver hooks. */ static void i915_perf_destroy_locked(struct i915_perf_stream *stream) @@ -2629,8 +3215,6 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream) if (stream->ops->destroy) stream->ops->destroy(stream); - list_del(&stream->link); - if (stream->ctx) i915_gem_context_put(stream->ctx); @@ -2651,14 +3235,14 @@ static void i915_perf_destroy_locked(struct i915_perf_stream *stream) static int i915_perf_release(struct inode *inode, struct file *file) { struct i915_perf_stream *stream = file->private_data; - struct drm_i915_private *dev_priv = stream->dev_priv; + struct i915_perf *perf = stream->perf; - mutex_lock(&dev_priv->perf.lock); + mutex_lock(&perf->lock); i915_perf_destroy_locked(stream); - mutex_unlock(&dev_priv->perf.lock); + mutex_unlock(&perf->lock); /* Release the reference the perf stream kept on the driver. */ - drm_dev_put(&dev_priv->drm); + drm_dev_put(&perf->i915->drm); return 0; } @@ -2680,7 +3264,7 @@ static const struct file_operations fops = { /** * i915_perf_open_ioctl_locked - DRM ioctl() for userspace to open a stream FD - * @dev_priv: i915 device instance + * @perf: i915 perf instance * @param: The open parameters passed to 'DRM_I915_PERF_OPEN` * @props: individually validated u64 property value pairs * @file: drm file @@ -2688,7 +3272,7 @@ static const struct file_operations fops = { * See i915_perf_ioctl_open() for interface details. * * Implements further stream config validation and stream initialization on - * behalf of i915_perf_open_ioctl() with the &drm_i915_private->perf.lock mutex + * behalf of i915_perf_open_ioctl() with the &perf->lock mutex * taken to serialize with any non-file-operation driver hooks. * * Note: at this point the @props have only been validated in isolation and @@ -2703,7 +3287,7 @@ static const struct file_operations fops = { * Returns: zero on success or a negative error code. */ static int -i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, +i915_perf_open_ioctl_locked(struct i915_perf *perf, struct drm_i915_perf_open_param *param, struct perf_open_properties *props, struct drm_file *file) @@ -2734,17 +3318,34 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, * rest of the system, which we consider acceptable for a * non-privileged client. * - * For Gen8+ the OA unit no longer supports clock gating off for a + * For Gen8->11 the OA unit no longer supports clock gating off for a * specific context and the kernel can't securely stop the counters * from updating as system-wide / global values. Even though we can * filter reports based on the included context ID we can't block * clients from seeing the raw / global counter values via * MI_REPORT_PERF_COUNT commands and so consider it a privileged op to * enable the OA unit by default. + * + * For Gen12+ we gain a new OAR unit that only monitors the RCS on a + * per context basis. So we can relax requirements there if the user + * doesn't request global stream access (i.e. query based sampling + * using MI_RECORD_PERF_COUNT. */ - if (IS_HASWELL(dev_priv) && specific_ctx) + if (IS_HASWELL(perf->i915) && specific_ctx) + privileged_op = false; + else if (IS_GEN(perf->i915, 12) && specific_ctx && + (props->sample_flags & SAMPLE_OA_REPORT) == 0) privileged_op = false; + if (props->hold_preemption) { + if (!props->single_context) { + DRM_DEBUG("preemption disable with no context\n"); + ret = -EINVAL; + goto err; + } + privileged_op = true; + } + /* Similar to perf's kernel.perf_paranoid_cpu sysctl option * we check a dev.i915.perf_stream_paranoid sysctl option * to determine if it's ok to access system wide OA counters @@ -2752,7 +3353,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, */ if (privileged_op && i915_perf_stream_paranoid && !capable(CAP_SYS_ADMIN)) { - DRM_DEBUG("Insufficient privileges to open system-wide i915 perf stream\n"); + DRM_DEBUG("Insufficient privileges to open i915 perf stream\n"); ret = -EACCES; goto err_ctx; } @@ -2763,7 +3364,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, goto err_ctx; } - stream->dev_priv = dev_priv; + stream->perf = perf; stream->ctx = specific_ctx; ret = i915_oa_stream_init(stream, param, props); @@ -2779,8 +3380,6 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, goto err_flags; } - list_add(&stream->link, &dev_priv->perf.streams); - if (param->flags & I915_PERF_FLAG_FD_CLOEXEC) f_flags |= O_CLOEXEC; if (param->flags & I915_PERF_FLAG_FD_NONBLOCK) @@ -2789,7 +3388,7 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, stream_fd = anon_inode_getfd("[i915_perf]", &fops, stream, f_flags); if (stream_fd < 0) { ret = stream_fd; - goto err_open; + goto err_flags; } if (!(param->flags & I915_PERF_FLAG_DISABLED)) @@ -2798,12 +3397,10 @@ i915_perf_open_ioctl_locked(struct drm_i915_private *dev_priv, /* Take a reference on the driver that will be kept with stream_fd * until its release. */ - drm_dev_get(&dev_priv->drm); + drm_dev_get(&perf->i915->drm); return stream_fd; -err_open: - list_del(&stream->link); err_flags: if (stream->ops->destroy) stream->ops->destroy(stream); @@ -2816,15 +3413,15 @@ err: return ret; } -static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent) +static u64 oa_exponent_to_ns(struct i915_perf *perf, int exponent) { return div64_u64(1000000000ULL * (2ULL << exponent), - 1000ULL * RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz); + 1000ULL * RUNTIME_INFO(perf->i915)->cs_timestamp_frequency_khz); } /** * read_properties_unlocked - validate + copy userspace stream open properties - * @dev_priv: i915 device instance + * @perf: i915 perf instance * @uprops: The array of u64 key value pairs given by userspace * @n_props: The number of key value pairs expected in @uprops * @props: The stream configuration built up while validating properties @@ -2837,7 +3434,7 @@ static u64 oa_exponent_to_ns(struct drm_i915_private *dev_priv, int exponent) * we shouldn't validate or assume anything about ordering here. This doesn't * rule out defining new properties with ordering requirements in the future. */ -static int read_properties_unlocked(struct drm_i915_private *dev_priv, +static int read_properties_unlocked(struct i915_perf *perf, u64 __user *uprops, u32 n_props, struct perf_open_properties *props) @@ -2852,6 +3449,15 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, return -EINVAL; } + /* At the moment we only support using i915-perf on the RCS. */ + props->engine = intel_engine_lookup_user(perf->i915, + I915_ENGINE_CLASS_RENDER, + 0); + if (!props->engine) { + DRM_DEBUG("No RENDER-capable engines\n"); + return -EINVAL; + } + /* Considering that ID = 0 is reserved and assuming that we don't * (currently) expect any configurations to ever specify duplicate * values for a particular property ID then the last _PROP_MAX value is @@ -2903,7 +3509,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, value); return -EINVAL; } - if (!dev_priv->perf.oa_formats[value].size) { + if (!perf->oa_formats[value].size) { DRM_DEBUG("Unsupported OA report format %llu\n", value); return -EINVAL; @@ -2924,7 +3530,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, */ BUILD_BUG_ON(sizeof(oa_period) != 8); - oa_period = oa_exponent_to_ns(dev_priv, value); + oa_period = oa_exponent_to_ns(perf, value); /* This check is primarily to ensure that oa_period <= * UINT32_MAX (before passing to do_div which only @@ -2949,6 +3555,9 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, props->oa_periodic = true; props->oa_period_exponent = value; break; + case DRM_I915_PERF_PROP_HOLD_PREEMPTION: + props->hold_preemption = !!value; + break; case DRM_I915_PERF_PROP_MAX: MISSING_CASE(id); return -EINVAL; @@ -2978,7 +3587,7 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, * mutex to avoid an awkward lockdep with mmap_sem. * * Most of the implementation details are handled by - * i915_perf_open_ioctl_locked() after taking the &drm_i915_private->perf.lock + * i915_perf_open_ioctl_locked() after taking the &perf->lock * mutex for serializing with any non-file-operation driver hooks. * * Return: A newly opened i915 Perf stream file descriptor or negative @@ -2987,13 +3596,13 @@ static int read_properties_unlocked(struct drm_i915_private *dev_priv, int i915_perf_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_perf *perf = &to_i915(dev)->perf; struct drm_i915_perf_open_param *param = data; struct perf_open_properties props; u32 known_open_flags; int ret; - if (!dev_priv->perf.initialized) { + if (!perf->i915) { DRM_DEBUG("i915 perf interface not available for this system\n"); return -ENOTSUPP; } @@ -3006,124 +3615,130 @@ int i915_perf_open_ioctl(struct drm_device *dev, void *data, return -EINVAL; } - ret = read_properties_unlocked(dev_priv, + ret = read_properties_unlocked(perf, u64_to_user_ptr(param->properties_ptr), param->num_properties, &props); if (ret) return ret; - mutex_lock(&dev_priv->perf.lock); - ret = i915_perf_open_ioctl_locked(dev_priv, param, &props, file); - mutex_unlock(&dev_priv->perf.lock); + mutex_lock(&perf->lock); + ret = i915_perf_open_ioctl_locked(perf, param, &props, file); + mutex_unlock(&perf->lock); return ret; } /** * i915_perf_register - exposes i915-perf to userspace - * @dev_priv: i915 device instance + * @i915: i915 device instance * * In particular OA metric sets are advertised under a sysfs metrics/ * directory allowing userspace to enumerate valid IDs that can be * used to open an i915-perf stream. */ -void i915_perf_register(struct drm_i915_private *dev_priv) +void i915_perf_register(struct drm_i915_private *i915) { + struct i915_perf *perf = &i915->perf; int ret; - if (!dev_priv->perf.initialized) + if (!perf->i915) return; /* To be sure we're synchronized with an attempted * i915_perf_open_ioctl(); considering that we register after * being exposed to userspace. */ - mutex_lock(&dev_priv->perf.lock); + mutex_lock(&perf->lock); - dev_priv->perf.metrics_kobj = + perf->metrics_kobj = kobject_create_and_add("metrics", - &dev_priv->drm.primary->kdev->kobj); - if (!dev_priv->perf.metrics_kobj) + &i915->drm.primary->kdev->kobj); + if (!perf->metrics_kobj) goto exit; - sysfs_attr_init(&dev_priv->perf.test_config.sysfs_metric_id.attr); - - if (INTEL_GEN(dev_priv) >= 11) { - i915_perf_load_test_config_icl(dev_priv); - } else if (IS_CANNONLAKE(dev_priv)) { - i915_perf_load_test_config_cnl(dev_priv); - } else if (IS_COFFEELAKE(dev_priv)) { - if (IS_CFL_GT2(dev_priv)) - i915_perf_load_test_config_cflgt2(dev_priv); - if (IS_CFL_GT3(dev_priv)) - i915_perf_load_test_config_cflgt3(dev_priv); - } else if (IS_GEMINILAKE(dev_priv)) { - i915_perf_load_test_config_glk(dev_priv); - } else if (IS_KABYLAKE(dev_priv)) { - if (IS_KBL_GT2(dev_priv)) - i915_perf_load_test_config_kblgt2(dev_priv); - else if (IS_KBL_GT3(dev_priv)) - i915_perf_load_test_config_kblgt3(dev_priv); - } else if (IS_BROXTON(dev_priv)) { - i915_perf_load_test_config_bxt(dev_priv); - } else if (IS_SKYLAKE(dev_priv)) { - if (IS_SKL_GT2(dev_priv)) - i915_perf_load_test_config_sklgt2(dev_priv); - else if (IS_SKL_GT3(dev_priv)) - i915_perf_load_test_config_sklgt3(dev_priv); - else if (IS_SKL_GT4(dev_priv)) - i915_perf_load_test_config_sklgt4(dev_priv); - } else if (IS_CHERRYVIEW(dev_priv)) { - i915_perf_load_test_config_chv(dev_priv); - } else if (IS_BROADWELL(dev_priv)) { - i915_perf_load_test_config_bdw(dev_priv); - } else if (IS_HASWELL(dev_priv)) { - i915_perf_load_test_config_hsw(dev_priv); -} - - if (dev_priv->perf.test_config.id == 0) + sysfs_attr_init(&perf->test_config.sysfs_metric_id.attr); + + if (IS_TIGERLAKE(i915)) { + i915_perf_load_test_config_tgl(i915); + } else if (INTEL_GEN(i915) >= 11) { + i915_perf_load_test_config_icl(i915); + } else if (IS_CANNONLAKE(i915)) { + i915_perf_load_test_config_cnl(i915); + } else if (IS_COFFEELAKE(i915)) { + if (IS_CFL_GT2(i915)) + i915_perf_load_test_config_cflgt2(i915); + if (IS_CFL_GT3(i915)) + i915_perf_load_test_config_cflgt3(i915); + } else if (IS_GEMINILAKE(i915)) { + i915_perf_load_test_config_glk(i915); + } else if (IS_KABYLAKE(i915)) { + if (IS_KBL_GT2(i915)) + i915_perf_load_test_config_kblgt2(i915); + else if (IS_KBL_GT3(i915)) + i915_perf_load_test_config_kblgt3(i915); + } else if (IS_BROXTON(i915)) { + i915_perf_load_test_config_bxt(i915); + } else if (IS_SKYLAKE(i915)) { + if (IS_SKL_GT2(i915)) + i915_perf_load_test_config_sklgt2(i915); + else if (IS_SKL_GT3(i915)) + i915_perf_load_test_config_sklgt3(i915); + else if (IS_SKL_GT4(i915)) + i915_perf_load_test_config_sklgt4(i915); + } else if (IS_CHERRYVIEW(i915)) { + i915_perf_load_test_config_chv(i915); + } else if (IS_BROADWELL(i915)) { + i915_perf_load_test_config_bdw(i915); + } else if (IS_HASWELL(i915)) { + i915_perf_load_test_config_hsw(i915); + } + + if (perf->test_config.id == 0) goto sysfs_error; - ret = sysfs_create_group(dev_priv->perf.metrics_kobj, - &dev_priv->perf.test_config.sysfs_metric); + ret = sysfs_create_group(perf->metrics_kobj, + &perf->test_config.sysfs_metric); if (ret) goto sysfs_error; - atomic_set(&dev_priv->perf.test_config.ref_count, 1); + perf->test_config.perf = perf; + kref_init(&perf->test_config.ref); goto exit; sysfs_error: - kobject_put(dev_priv->perf.metrics_kobj); - dev_priv->perf.metrics_kobj = NULL; + kobject_put(perf->metrics_kobj); + perf->metrics_kobj = NULL; exit: - mutex_unlock(&dev_priv->perf.lock); + mutex_unlock(&perf->lock); } /** * i915_perf_unregister - hide i915-perf from userspace - * @dev_priv: i915 device instance + * @i915: i915 device instance * * i915-perf state cleanup is split up into an 'unregister' and * 'deinit' phase where the interface is first hidden from * userspace by i915_perf_unregister() before cleaning up * remaining state in i915_perf_fini(). */ -void i915_perf_unregister(struct drm_i915_private *dev_priv) +void i915_perf_unregister(struct drm_i915_private *i915) { - if (!dev_priv->perf.metrics_kobj) + struct i915_perf *perf = &i915->perf; + + if (!perf->metrics_kobj) return; - sysfs_remove_group(dev_priv->perf.metrics_kobj, - &dev_priv->perf.test_config.sysfs_metric); + sysfs_remove_group(perf->metrics_kobj, + &perf->test_config.sysfs_metric); - kobject_put(dev_priv->perf.metrics_kobj); - dev_priv->perf.metrics_kobj = NULL; + kobject_put(perf->metrics_kobj); + perf->metrics_kobj = NULL; } -static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr) +static bool gen8_is_valid_flex_addr(struct i915_perf *perf, u32 addr) { static const i915_reg_t flex_eu_regs[] = { EU_PERF_CNTL0, @@ -3143,56 +3758,80 @@ static bool gen8_is_valid_flex_addr(struct drm_i915_private *dev_priv, u32 addr) return false; } -static bool gen7_is_valid_b_counter_addr(struct drm_i915_private *dev_priv, u32 addr) +#define ADDR_IN_RANGE(addr, start, end) \ + ((addr) >= (start) && \ + (addr) <= (end)) + +#define REG_IN_RANGE(addr, start, end) \ + ((addr) >= i915_mmio_reg_offset(start) && \ + (addr) <= i915_mmio_reg_offset(end)) + +#define REG_EQUAL(addr, mmio) \ + ((addr) == i915_mmio_reg_offset(mmio)) + +static bool gen7_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) +{ + return REG_IN_RANGE(addr, OASTARTTRIG1, OASTARTTRIG8) || + REG_IN_RANGE(addr, OAREPORTTRIG1, OAREPORTTRIG8) || + REG_IN_RANGE(addr, OACEC0_0, OACEC7_1); +} + +static bool gen7_is_valid_mux_addr(struct i915_perf *perf, u32 addr) +{ + return REG_EQUAL(addr, HALF_SLICE_CHICKEN2) || + REG_IN_RANGE(addr, MICRO_BP0_0, NOA_WRITE) || + REG_IN_RANGE(addr, OA_PERFCNT1_LO, OA_PERFCNT2_HI) || + REG_IN_RANGE(addr, OA_PERFMATRIX_LO, OA_PERFMATRIX_HI); +} + +static bool gen8_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return (addr >= i915_mmio_reg_offset(OASTARTTRIG1) && - addr <= i915_mmio_reg_offset(OASTARTTRIG8)) || - (addr >= i915_mmio_reg_offset(OAREPORTTRIG1) && - addr <= i915_mmio_reg_offset(OAREPORTTRIG8)) || - (addr >= i915_mmio_reg_offset(OACEC0_0) && - addr <= i915_mmio_reg_offset(OACEC7_1)); + return gen7_is_valid_mux_addr(perf, addr) || + REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) || + REG_IN_RANGE(addr, RPM_CONFIG0, NOA_CONFIG(8)); } -static bool gen7_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) +static bool gen10_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return addr == i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) || - (addr >= i915_mmio_reg_offset(MICRO_BP0_0) && - addr <= i915_mmio_reg_offset(NOA_WRITE)) || - (addr >= i915_mmio_reg_offset(OA_PERFCNT1_LO) && - addr <= i915_mmio_reg_offset(OA_PERFCNT2_HI)) || - (addr >= i915_mmio_reg_offset(OA_PERFMATRIX_LO) && - addr <= i915_mmio_reg_offset(OA_PERFMATRIX_HI)); + return gen8_is_valid_mux_addr(perf, addr) || + REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) || + REG_IN_RANGE(addr, OA_PERFCNT3_LO, OA_PERFCNT4_HI); } -static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) +static bool hsw_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return gen7_is_valid_mux_addr(dev_priv, addr) || - addr == i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) || - (addr >= i915_mmio_reg_offset(RPM_CONFIG0) && - addr <= i915_mmio_reg_offset(NOA_CONFIG(8))); + return gen7_is_valid_mux_addr(perf, addr) || + ADDR_IN_RANGE(addr, 0x25100, 0x2FF90) || + REG_IN_RANGE(addr, HSW_MBVID2_NOA0, HSW_MBVID2_NOA9) || + REG_EQUAL(addr, HSW_MBVID2_MISR0); } -static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) +static bool chv_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return gen8_is_valid_mux_addr(dev_priv, addr) || - addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) || - (addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) && - addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI)); + return gen7_is_valid_mux_addr(perf, addr) || + ADDR_IN_RANGE(addr, 0x182300, 0x1823A4); } -static bool hsw_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) +static bool gen12_is_valid_b_counter_addr(struct i915_perf *perf, u32 addr) { - return gen7_is_valid_mux_addr(dev_priv, addr) || - (addr >= 0x25100 && addr <= 0x2FF90) || - (addr >= i915_mmio_reg_offset(HSW_MBVID2_NOA0) && - addr <= i915_mmio_reg_offset(HSW_MBVID2_NOA9)) || - addr == i915_mmio_reg_offset(HSW_MBVID2_MISR0); + return REG_IN_RANGE(addr, GEN12_OAG_OASTARTTRIG1, GEN12_OAG_OASTARTTRIG8) || + REG_IN_RANGE(addr, GEN12_OAG_OAREPORTTRIG1, GEN12_OAG_OAREPORTTRIG8) || + REG_IN_RANGE(addr, GEN12_OAG_CEC0_0, GEN12_OAG_CEC7_1) || + REG_IN_RANGE(addr, GEN12_OAG_SCEC0_0, GEN12_OAG_SCEC7_1) || + REG_EQUAL(addr, GEN12_OAA_DBG_REG) || + REG_EQUAL(addr, GEN12_OAG_OA_PESS) || + REG_EQUAL(addr, GEN12_OAG_SPCTR_CNF); } -static bool chv_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr) +static bool gen12_is_valid_mux_addr(struct i915_perf *perf, u32 addr) { - return gen7_is_valid_mux_addr(dev_priv, addr) || - (addr >= 0x182300 && addr <= 0x1823A4); + return REG_EQUAL(addr, NOA_WRITE) || + REG_EQUAL(addr, GEN10_NOA_WRITE_HIGH) || + REG_EQUAL(addr, GDT_CHICKEN_BITS) || + REG_EQUAL(addr, WAIT_FOR_RC6_EXIT) || + REG_EQUAL(addr, RPM_CONFIG0) || + REG_EQUAL(addr, RPM_CONFIG1) || + REG_IN_RANGE(addr, NOA_CONFIG(0), NOA_CONFIG(8)); } static u32 mask_reg_value(u32 reg, u32 val) @@ -3201,21 +3840,21 @@ static u32 mask_reg_value(u32 reg, u32 val) * WaDisableSTUnitPowerOptimization workaround. Make sure the value * programmed by userspace doesn't change this. */ - if (i915_mmio_reg_offset(HALF_SLICE_CHICKEN2) == reg) + if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2)) val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE); /* WAIT_FOR_RC6_EXIT has only one bit fullfilling the function * indicated by its name and a bunch of selection fields used by OA * configs. */ - if (i915_mmio_reg_offset(WAIT_FOR_RC6_EXIT) == reg) + if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT)) val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE); return val; } -static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv, - bool (*is_valid)(struct drm_i915_private *dev_priv, u32 addr), +static struct i915_oa_reg *alloc_oa_regs(struct i915_perf *perf, + bool (*is_valid)(struct i915_perf *perf, u32 addr), u32 __user *regs, u32 n_regs) { @@ -3245,7 +3884,7 @@ static struct i915_oa_reg *alloc_oa_regs(struct drm_i915_private *dev_priv, if (err) goto addr_err; - if (!is_valid(dev_priv, addr)) { + if (!is_valid(perf, addr)) { DRM_DEBUG("Invalid oa_reg address: %X\n", addr); err = -EINVAL; goto addr_err; @@ -3278,7 +3917,7 @@ static ssize_t show_dynamic_id(struct device *dev, return sprintf(buf, "%d\n", oa_config->id); } -static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv, +static int create_dynamic_oa_sysfs_entry(struct i915_perf *perf, struct i915_oa_config *oa_config) { sysfs_attr_init(&oa_config->sysfs_metric_id.attr); @@ -3293,7 +3932,7 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv, oa_config->sysfs_metric.name = oa_config->uuid; oa_config->sysfs_metric.attrs = oa_config->attrs; - return sysfs_create_group(dev_priv->perf.metrics_kobj, + return sysfs_create_group(perf->metrics_kobj, &oa_config->sysfs_metric); } @@ -3313,17 +3952,18 @@ static int create_dynamic_oa_sysfs_entry(struct drm_i915_private *dev_priv, int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_perf *perf = &to_i915(dev)->perf; struct drm_i915_perf_oa_config *args = data; struct i915_oa_config *oa_config, *tmp; + static struct i915_oa_reg *regs; int err, id; - if (!dev_priv->perf.initialized) { + if (!perf->i915) { DRM_DEBUG("i915 perf interface not available for this system\n"); return -ENOTSUPP; } - if (!dev_priv->perf.metrics_kobj) { + if (!perf->metrics_kobj) { DRM_DEBUG("OA metrics weren't advertised via sysfs\n"); return -EINVAL; } @@ -3346,7 +3986,8 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, return -ENOMEM; } - atomic_set(&oa_config->ref_count, 1); + oa_config->perf = perf; + kref_init(&oa_config->ref); if (!uuid_is_valid(args->uuid)) { DRM_DEBUG("Invalid uuid format for OA config\n"); @@ -3360,59 +4001,59 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, memcpy(oa_config->uuid, args->uuid, sizeof(args->uuid)); oa_config->mux_regs_len = args->n_mux_regs; - oa_config->mux_regs = - alloc_oa_regs(dev_priv, - dev_priv->perf.ops.is_valid_mux_reg, - u64_to_user_ptr(args->mux_regs_ptr), - args->n_mux_regs); + regs = alloc_oa_regs(perf, + perf->ops.is_valid_mux_reg, + u64_to_user_ptr(args->mux_regs_ptr), + args->n_mux_regs); - if (IS_ERR(oa_config->mux_regs)) { + if (IS_ERR(regs)) { DRM_DEBUG("Failed to create OA config for mux_regs\n"); - err = PTR_ERR(oa_config->mux_regs); + err = PTR_ERR(regs); goto reg_err; } + oa_config->mux_regs = regs; oa_config->b_counter_regs_len = args->n_boolean_regs; - oa_config->b_counter_regs = - alloc_oa_regs(dev_priv, - dev_priv->perf.ops.is_valid_b_counter_reg, - u64_to_user_ptr(args->boolean_regs_ptr), - args->n_boolean_regs); + regs = alloc_oa_regs(perf, + perf->ops.is_valid_b_counter_reg, + u64_to_user_ptr(args->boolean_regs_ptr), + args->n_boolean_regs); - if (IS_ERR(oa_config->b_counter_regs)) { + if (IS_ERR(regs)) { DRM_DEBUG("Failed to create OA config for b_counter_regs\n"); - err = PTR_ERR(oa_config->b_counter_regs); + err = PTR_ERR(regs); goto reg_err; } + oa_config->b_counter_regs = regs; - if (INTEL_GEN(dev_priv) < 8) { + if (INTEL_GEN(perf->i915) < 8) { if (args->n_flex_regs != 0) { err = -EINVAL; goto reg_err; } } else { oa_config->flex_regs_len = args->n_flex_regs; - oa_config->flex_regs = - alloc_oa_regs(dev_priv, - dev_priv->perf.ops.is_valid_flex_reg, - u64_to_user_ptr(args->flex_regs_ptr), - args->n_flex_regs); + regs = alloc_oa_regs(perf, + perf->ops.is_valid_flex_reg, + u64_to_user_ptr(args->flex_regs_ptr), + args->n_flex_regs); - if (IS_ERR(oa_config->flex_regs)) { + if (IS_ERR(regs)) { DRM_DEBUG("Failed to create OA config for flex_regs\n"); - err = PTR_ERR(oa_config->flex_regs); + err = PTR_ERR(regs); goto reg_err; } + oa_config->flex_regs = regs; } - err = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); + err = mutex_lock_interruptible(&perf->metrics_lock); if (err) goto reg_err; /* We shouldn't have too many configs, so this iteration shouldn't be * too costly. */ - idr_for_each_entry(&dev_priv->perf.metrics_idr, tmp, id) { + idr_for_each_entry(&perf->metrics_idr, tmp, id) { if (!strcmp(tmp->uuid, oa_config->uuid)) { DRM_DEBUG("OA config already exists with this uuid\n"); err = -EADDRINUSE; @@ -3420,14 +4061,14 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, } } - err = create_dynamic_oa_sysfs_entry(dev_priv, oa_config); + err = create_dynamic_oa_sysfs_entry(perf, oa_config); if (err) { DRM_DEBUG("Failed to create sysfs entry for OA config\n"); goto sysfs_err; } /* Config id 0 is invalid, id 1 for kernel stored test config. */ - oa_config->id = idr_alloc(&dev_priv->perf.metrics_idr, + oa_config->id = idr_alloc(&perf->metrics_idr, oa_config, 2, 0, GFP_KERNEL); if (oa_config->id < 0) { @@ -3436,16 +4077,16 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, goto sysfs_err; } - mutex_unlock(&dev_priv->perf.metrics_lock); + mutex_unlock(&perf->metrics_lock); DRM_DEBUG("Added config %s id=%i\n", oa_config->uuid, oa_config->id); return oa_config->id; sysfs_err: - mutex_unlock(&dev_priv->perf.metrics_lock); + mutex_unlock(&perf->metrics_lock); reg_err: - put_oa_config(dev_priv, oa_config); + i915_oa_config_put(oa_config); DRM_DEBUG("Failed to add new OA config\n"); return err; } @@ -3464,12 +4105,12 @@ reg_err: int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { - struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_perf *perf = &to_i915(dev)->perf; u64 *arg = data; struct i915_oa_config *oa_config; int ret; - if (!dev_priv->perf.initialized) { + if (!perf->i915) { DRM_DEBUG("i915 perf interface not available for this system\n"); return -ENOTSUPP; } @@ -3479,31 +4120,33 @@ int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, return -EACCES; } - ret = mutex_lock_interruptible(&dev_priv->perf.metrics_lock); + ret = mutex_lock_interruptible(&perf->metrics_lock); if (ret) - goto lock_err; + return ret; - oa_config = idr_find(&dev_priv->perf.metrics_idr, *arg); + oa_config = idr_find(&perf->metrics_idr, *arg); if (!oa_config) { DRM_DEBUG("Failed to remove unknown OA config\n"); ret = -ENOENT; - goto config_err; + goto err_unlock; } GEM_BUG_ON(*arg != oa_config->id); - sysfs_remove_group(dev_priv->perf.metrics_kobj, - &oa_config->sysfs_metric); + sysfs_remove_group(perf->metrics_kobj, &oa_config->sysfs_metric); + + idr_remove(&perf->metrics_idr, *arg); - idr_remove(&dev_priv->perf.metrics_idr, *arg); + mutex_unlock(&perf->metrics_lock); DRM_DEBUG("Removed config %s id=%i\n", oa_config->uuid, oa_config->id); - put_oa_config(dev_priv, oa_config); + i915_oa_config_put(oa_config); -config_err: - mutex_unlock(&dev_priv->perf.metrics_lock); -lock_err: + return 0; + +err_unlock: + mutex_unlock(&perf->metrics_lock); return ret; } @@ -3551,103 +4194,126 @@ static struct ctl_table dev_root[] = { /** * i915_perf_init - initialize i915-perf state on module load - * @dev_priv: i915 device instance + * @i915: i915 device instance * * Initializes i915-perf state without exposing anything to userspace. * * Note: i915-perf initialization is split into an 'init' and 'register' * phase with the i915_perf_register() exposing state to userspace. */ -void i915_perf_init(struct drm_i915_private *dev_priv) -{ - if (IS_HASWELL(dev_priv)) { - dev_priv->perf.ops.is_valid_b_counter_reg = - gen7_is_valid_b_counter_addr; - dev_priv->perf.ops.is_valid_mux_reg = - hsw_is_valid_mux_addr; - dev_priv->perf.ops.is_valid_flex_reg = NULL; - dev_priv->perf.ops.enable_metric_set = hsw_enable_metric_set; - dev_priv->perf.ops.disable_metric_set = hsw_disable_metric_set; - dev_priv->perf.ops.oa_enable = gen7_oa_enable; - dev_priv->perf.ops.oa_disable = gen7_oa_disable; - dev_priv->perf.ops.read = gen7_oa_read; - dev_priv->perf.ops.oa_hw_tail_read = - gen7_oa_hw_tail_read; - - dev_priv->perf.oa_formats = hsw_oa_formats; - } else if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) { +void i915_perf_init(struct drm_i915_private *i915) +{ + struct i915_perf *perf = &i915->perf; + + /* XXX const struct i915_perf_ops! */ + + if (IS_HASWELL(i915)) { + perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; + perf->ops.is_valid_mux_reg = hsw_is_valid_mux_addr; + perf->ops.is_valid_flex_reg = NULL; + perf->ops.enable_metric_set = hsw_enable_metric_set; + perf->ops.disable_metric_set = hsw_disable_metric_set; + perf->ops.oa_enable = gen7_oa_enable; + perf->ops.oa_disable = gen7_oa_disable; + perf->ops.read = gen7_oa_read; + perf->ops.oa_hw_tail_read = gen7_oa_hw_tail_read; + + perf->oa_formats = hsw_oa_formats; + } else if (HAS_LOGICAL_RING_CONTEXTS(i915)) { /* Note: that although we could theoretically also support the * legacy ringbuffer mode on BDW (and earlier iterations of * this driver, before upstreaming did this) it didn't seem * worth the complexity to maintain now that BDW+ enable * execlist mode by default. */ - dev_priv->perf.oa_formats = gen8_plus_oa_formats; + perf->ops.read = gen8_oa_read; - dev_priv->perf.ops.oa_enable = gen8_oa_enable; - dev_priv->perf.ops.oa_disable = gen8_oa_disable; - dev_priv->perf.ops.read = gen8_oa_read; - dev_priv->perf.ops.oa_hw_tail_read = gen8_oa_hw_tail_read; + if (IS_GEN_RANGE(i915, 8, 9)) { + perf->oa_formats = gen8_plus_oa_formats; - if (IS_GEN_RANGE(dev_priv, 8, 9)) { - dev_priv->perf.ops.is_valid_b_counter_reg = + perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; - dev_priv->perf.ops.is_valid_mux_reg = + perf->ops.is_valid_mux_reg = gen8_is_valid_mux_addr; - dev_priv->perf.ops.is_valid_flex_reg = + perf->ops.is_valid_flex_reg = gen8_is_valid_flex_addr; - if (IS_CHERRYVIEW(dev_priv)) { - dev_priv->perf.ops.is_valid_mux_reg = + if (IS_CHERRYVIEW(i915)) { + perf->ops.is_valid_mux_reg = chv_is_valid_mux_addr; } - dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set; - dev_priv->perf.ops.disable_metric_set = gen8_disable_metric_set; + perf->ops.oa_enable = gen8_oa_enable; + perf->ops.oa_disable = gen8_oa_disable; + perf->ops.enable_metric_set = gen8_enable_metric_set; + perf->ops.disable_metric_set = gen8_disable_metric_set; + perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; - if (IS_GEN(dev_priv, 8)) { - dev_priv->perf.ctx_oactxctrl_offset = 0x120; - dev_priv->perf.ctx_flexeu0_offset = 0x2ce; + if (IS_GEN(i915, 8)) { + perf->ctx_oactxctrl_offset = 0x120; + perf->ctx_flexeu0_offset = 0x2ce; - dev_priv->perf.gen8_valid_ctx_bit = BIT(25); + perf->gen8_valid_ctx_bit = BIT(25); } else { - dev_priv->perf.ctx_oactxctrl_offset = 0x128; - dev_priv->perf.ctx_flexeu0_offset = 0x3de; + perf->ctx_oactxctrl_offset = 0x128; + perf->ctx_flexeu0_offset = 0x3de; - dev_priv->perf.gen8_valid_ctx_bit = BIT(16); + perf->gen8_valid_ctx_bit = BIT(16); } - } else if (IS_GEN_RANGE(dev_priv, 10, 11)) { - dev_priv->perf.ops.is_valid_b_counter_reg = + } else if (IS_GEN_RANGE(i915, 10, 11)) { + perf->oa_formats = gen8_plus_oa_formats; + + perf->ops.is_valid_b_counter_reg = gen7_is_valid_b_counter_addr; - dev_priv->perf.ops.is_valid_mux_reg = + perf->ops.is_valid_mux_reg = gen10_is_valid_mux_addr; - dev_priv->perf.ops.is_valid_flex_reg = + perf->ops.is_valid_flex_reg = gen8_is_valid_flex_addr; - dev_priv->perf.ops.enable_metric_set = gen8_enable_metric_set; - dev_priv->perf.ops.disable_metric_set = gen10_disable_metric_set; + perf->ops.oa_enable = gen8_oa_enable; + perf->ops.oa_disable = gen8_oa_disable; + perf->ops.enable_metric_set = gen8_enable_metric_set; + perf->ops.disable_metric_set = gen10_disable_metric_set; + perf->ops.oa_hw_tail_read = gen8_oa_hw_tail_read; - if (IS_GEN(dev_priv, 10)) { - dev_priv->perf.ctx_oactxctrl_offset = 0x128; - dev_priv->perf.ctx_flexeu0_offset = 0x3de; + if (IS_GEN(i915, 10)) { + perf->ctx_oactxctrl_offset = 0x128; + perf->ctx_flexeu0_offset = 0x3de; } else { - dev_priv->perf.ctx_oactxctrl_offset = 0x124; - dev_priv->perf.ctx_flexeu0_offset = 0x78e; + perf->ctx_oactxctrl_offset = 0x124; + perf->ctx_flexeu0_offset = 0x78e; } - dev_priv->perf.gen8_valid_ctx_bit = BIT(16); + perf->gen8_valid_ctx_bit = BIT(16); + } else if (IS_GEN(i915, 12)) { + perf->oa_formats = gen12_oa_formats; + + perf->ops.is_valid_b_counter_reg = + gen12_is_valid_b_counter_addr; + perf->ops.is_valid_mux_reg = + gen12_is_valid_mux_addr; + perf->ops.is_valid_flex_reg = + gen8_is_valid_flex_addr; + + perf->ops.oa_enable = gen12_oa_enable; + perf->ops.oa_disable = gen12_oa_disable; + perf->ops.enable_metric_set = gen12_enable_metric_set; + perf->ops.disable_metric_set = gen12_disable_metric_set; + perf->ops.oa_hw_tail_read = gen12_oa_hw_tail_read; + + perf->ctx_flexeu0_offset = 0; + perf->ctx_oactxctrl_offset = 0x144; } } - if (dev_priv->perf.ops.enable_metric_set) { - INIT_LIST_HEAD(&dev_priv->perf.streams); - mutex_init(&dev_priv->perf.lock); + if (perf->ops.enable_metric_set) { + mutex_init(&perf->lock); oa_sample_rate_hard_limit = 1000 * - (RUNTIME_INFO(dev_priv)->cs_timestamp_frequency_khz / 2); - dev_priv->perf.sysctl_header = register_sysctl_table(dev_root); + (RUNTIME_INFO(i915)->cs_timestamp_frequency_khz / 2); + perf->sysctl_header = register_sysctl_table(dev_root); - mutex_init(&dev_priv->perf.metrics_lock); - idr_init(&dev_priv->perf.metrics_idr); + mutex_init(&perf->metrics_lock); + idr_init(&perf->metrics_idr); /* We set up some ratelimit state to potentially throttle any * _NOTES about spurious, invalid OA reports which we don't @@ -3659,44 +4325,70 @@ void i915_perf_init(struct drm_i915_private *dev_priv) * * Using the same limiting factors as printk_ratelimit() */ - ratelimit_state_init(&dev_priv->perf.spurious_report_rs, - 5 * HZ, 10); + ratelimit_state_init(&perf->spurious_report_rs, 5 * HZ, 10); /* Since we use a DRM_NOTE for spurious reports it would be * inconsistent to let __ratelimit() automatically print a * warning for throttling. */ - ratelimit_set_flags(&dev_priv->perf.spurious_report_rs, + ratelimit_set_flags(&perf->spurious_report_rs, RATELIMIT_MSG_ON_RELEASE); - dev_priv->perf.initialized = true; + atomic64_set(&perf->noa_programming_delay, + 500 * 1000 /* 500us */); + + perf->i915 = i915; } } static int destroy_config(int id, void *p, void *data) { - struct drm_i915_private *dev_priv = data; - struct i915_oa_config *oa_config = p; - - put_oa_config(dev_priv, oa_config); - + i915_oa_config_put(p); return 0; } /** * i915_perf_fini - Counter part to i915_perf_init() - * @dev_priv: i915 device instance + * @i915: i915 device instance */ -void i915_perf_fini(struct drm_i915_private *dev_priv) +void i915_perf_fini(struct drm_i915_private *i915) { - if (!dev_priv->perf.initialized) + struct i915_perf *perf = &i915->perf; + + if (!perf->i915) return; - idr_for_each(&dev_priv->perf.metrics_idr, destroy_config, dev_priv); - idr_destroy(&dev_priv->perf.metrics_idr); + idr_for_each(&perf->metrics_idr, destroy_config, perf); + idr_destroy(&perf->metrics_idr); - unregister_sysctl_table(dev_priv->perf.sysctl_header); + unregister_sysctl_table(perf->sysctl_header); - memset(&dev_priv->perf.ops, 0, sizeof(dev_priv->perf.ops)); + memset(&perf->ops, 0, sizeof(perf->ops)); + perf->i915 = NULL; +} - dev_priv->perf.initialized = false; +/** + * i915_perf_ioctl_version - Version of the i915-perf subsystem + * + * This version number is used by userspace to detect available features. + */ +int i915_perf_ioctl_version(void) +{ + /* + * 1: Initial version + * I915_PERF_IOCTL_ENABLE + * I915_PERF_IOCTL_DISABLE + * + * 2: Added runtime modification of OA config. + * I915_PERF_IOCTL_CONFIG + * + * 3: Add DRM_I915_PERF_PROP_HOLD_PREEMPTION parameter to hold + * preemption on a particular context so that performance data is + * accessible from a delta of MI_RPC reports without looking at the + * OA buffer. + */ + return 3; } + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/i915_perf.c" +#endif diff --git a/drivers/gpu/drm/i915/i915_perf.h b/drivers/gpu/drm/i915/i915_perf.h index a412b16d9ffc..4ceebce72060 100644 --- a/drivers/gpu/drm/i915/i915_perf.h +++ b/drivers/gpu/drm/i915/i915_perf.h @@ -6,11 +6,15 @@ #ifndef __I915_PERF_H__ #define __I915_PERF_H__ +#include <linux/kref.h> #include <linux/types.h> +#include "i915_perf_types.h" + struct drm_device; struct drm_file; struct drm_i915_private; +struct i915_oa_config; struct intel_context; struct intel_engine_cs; @@ -18,6 +22,7 @@ void i915_perf_init(struct drm_i915_private *i915); void i915_perf_fini(struct drm_i915_private *i915); void i915_perf_register(struct drm_i915_private *i915); void i915_perf_unregister(struct drm_i915_private *i915); +int i915_perf_ioctl_version(void); int i915_perf_open_ioctl(struct drm_device *dev, void *data, struct drm_file *file); @@ -25,8 +30,29 @@ int i915_perf_add_config_ioctl(struct drm_device *dev, void *data, struct drm_file *file); int i915_perf_remove_config_ioctl(struct drm_device *dev, void *data, struct drm_file *file); -void i915_oa_init_reg_state(struct intel_engine_cs *engine, - struct intel_context *ce, - u32 *reg_state); + +void i915_oa_init_reg_state(const struct intel_context *ce, + const struct intel_engine_cs *engine); + +struct i915_oa_config * +i915_perf_get_oa_config(struct i915_perf *perf, int metrics_set); + +static inline struct i915_oa_config * +i915_oa_config_get(struct i915_oa_config *oa_config) +{ + if (kref_get_unless_zero(&oa_config->ref)) + return oa_config; + else + return NULL; +} + +void i915_oa_config_release(struct kref *ref); +static inline void i915_oa_config_put(struct i915_oa_config *oa_config) +{ + if (!oa_config) + return; + + kref_put(&oa_config->ref, i915_oa_config_release); +} #endif /* __I915_PERF_H__ */ diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h new file mode 100644 index 000000000000..74ddc20a0d37 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_perf_types.h @@ -0,0 +1,435 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef _I915_PERF_TYPES_H_ +#define _I915_PERF_TYPES_H_ + +#include <linux/atomic.h> +#include <linux/device.h> +#include <linux/hrtimer.h> +#include <linux/llist.h> +#include <linux/poll.h> +#include <linux/sysfs.h> +#include <linux/types.h> +#include <linux/uuid.h> +#include <linux/wait.h> + +#include "i915_reg.h" +#include "intel_wakeref.h" + +struct drm_i915_private; +struct file; +struct i915_gem_context; +struct i915_perf; +struct i915_vma; +struct intel_context; +struct intel_engine_cs; + +struct i915_oa_format { + u32 format; + int size; +}; + +struct i915_oa_reg { + i915_reg_t addr; + u32 value; +}; + +struct i915_oa_config { + struct i915_perf *perf; + + char uuid[UUID_STRING_LEN + 1]; + int id; + + const struct i915_oa_reg *mux_regs; + u32 mux_regs_len; + const struct i915_oa_reg *b_counter_regs; + u32 b_counter_regs_len; + const struct i915_oa_reg *flex_regs; + u32 flex_regs_len; + + struct attribute_group sysfs_metric; + struct attribute *attrs[2]; + struct device_attribute sysfs_metric_id; + + struct kref ref; + struct rcu_head rcu; +}; + +struct i915_perf_stream; + +/** + * struct i915_perf_stream_ops - the OPs to support a specific stream type + */ +struct i915_perf_stream_ops { + /** + * @enable: Enables the collection of HW samples, either in response to + * `I915_PERF_IOCTL_ENABLE` or implicitly called when stream is opened + * without `I915_PERF_FLAG_DISABLED`. + */ + void (*enable)(struct i915_perf_stream *stream); + + /** + * @disable: Disables the collection of HW samples, either in response + * to `I915_PERF_IOCTL_DISABLE` or implicitly called before destroying + * the stream. + */ + void (*disable)(struct i915_perf_stream *stream); + + /** + * @poll_wait: Call poll_wait, passing a wait queue that will be woken + * once there is something ready to read() for the stream + */ + void (*poll_wait)(struct i915_perf_stream *stream, + struct file *file, + poll_table *wait); + + /** + * @wait_unlocked: For handling a blocking read, wait until there is + * something to ready to read() for the stream. E.g. wait on the same + * wait queue that would be passed to poll_wait(). + */ + int (*wait_unlocked)(struct i915_perf_stream *stream); + + /** + * @read: Copy buffered metrics as records to userspace + * **buf**: the userspace, destination buffer + * **count**: the number of bytes to copy, requested by userspace + * **offset**: zero at the start of the read, updated as the read + * proceeds, it represents how many bytes have been copied so far and + * the buffer offset for copying the next record. + * + * Copy as many buffered i915 perf samples and records for this stream + * to userspace as will fit in the given buffer. + * + * Only write complete records; returning -%ENOSPC if there isn't room + * for a complete record. + * + * Return any error condition that results in a short read such as + * -%ENOSPC or -%EFAULT, even though these may be squashed before + * returning to userspace. + */ + int (*read)(struct i915_perf_stream *stream, + char __user *buf, + size_t count, + size_t *offset); + + /** + * @destroy: Cleanup any stream specific resources. + * + * The stream will always be disabled before this is called. + */ + void (*destroy)(struct i915_perf_stream *stream); +}; + +/** + * struct i915_perf_stream - state for a single open stream FD + */ +struct i915_perf_stream { + /** + * @perf: i915_perf backpointer + */ + struct i915_perf *perf; + + /** + * @uncore: mmio access path + */ + struct intel_uncore *uncore; + + /** + * @engine: Engine associated with this performance stream. + */ + struct intel_engine_cs *engine; + + /** + * @sample_flags: Flags representing the `DRM_I915_PERF_PROP_SAMPLE_*` + * properties given when opening a stream, representing the contents + * of a single sample as read() by userspace. + */ + u32 sample_flags; + + /** + * @sample_size: Considering the configured contents of a sample + * combined with the required header size, this is the total size + * of a single sample record. + */ + int sample_size; + + /** + * @ctx: %NULL if measuring system-wide across all contexts or a + * specific context that is being monitored. + */ + struct i915_gem_context *ctx; + + /** + * @enabled: Whether the stream is currently enabled, considering + * whether the stream was opened in a disabled state and based + * on `I915_PERF_IOCTL_ENABLE` and `I915_PERF_IOCTL_DISABLE` calls. + */ + bool enabled; + + /** + * @hold_preemption: Whether preemption is put on hold for command + * submissions done on the @ctx. This is useful for some drivers that + * cannot easily post process the OA buffer context to subtract delta + * of performance counters not associated with @ctx. + */ + bool hold_preemption; + + /** + * @ops: The callbacks providing the implementation of this specific + * type of configured stream. + */ + const struct i915_perf_stream_ops *ops; + + /** + * @oa_config: The OA configuration used by the stream. + */ + struct i915_oa_config *oa_config; + + /** + * @oa_config_bos: A list of struct i915_oa_config_bo allocated lazily + * each time @oa_config changes. + */ + struct llist_head oa_config_bos; + + /** + * @pinned_ctx: The OA context specific information. + */ + struct intel_context *pinned_ctx; + + /** + * @specific_ctx_id: The id of the specific context. + */ + u32 specific_ctx_id; + + /** + * @specific_ctx_id_mask: The mask used to masking specific_ctx_id bits. + */ + u32 specific_ctx_id_mask; + + /** + * @poll_check_timer: High resolution timer that will periodically + * check for data in the circular OA buffer for notifying userspace + * (e.g. during a read() or poll()). + */ + struct hrtimer poll_check_timer; + + /** + * @poll_wq: The wait queue that hrtimer callback wakes when it + * sees data ready to read in the circular OA buffer. + */ + wait_queue_head_t poll_wq; + + /** + * @pollin: Whether there is data available to read. + */ + bool pollin; + + /** + * @periodic: Whether periodic sampling is currently enabled. + */ + bool periodic; + + /** + * @period_exponent: The OA unit sampling frequency is derived from this. + */ + int period_exponent; + + /** + * @oa_buffer: State of the OA buffer. + */ + struct { + struct i915_vma *vma; + u8 *vaddr; + u32 last_ctx_id; + int format; + int format_size; + int size_exponent; + + /** + * @ptr_lock: Locks reads and writes to all head/tail state + * + * Consider: the head and tail pointer state needs to be read + * consistently from a hrtimer callback (atomic context) and + * read() fop (user context) with tail pointer updates happening + * in atomic context and head updates in user context and the + * (unlikely) possibility of read() errors needing to reset all + * head/tail state. + * + * Note: Contention/performance aren't currently a significant + * concern here considering the relatively low frequency of + * hrtimer callbacks (5ms period) and that reads typically only + * happen in response to a hrtimer event and likely complete + * before the next callback. + * + * Note: This lock is not held *while* reading and copying data + * to userspace so the value of head observed in htrimer + * callbacks won't represent any partial consumption of data. + */ + spinlock_t ptr_lock; + + /** + * @tails: One 'aging' tail pointer and one 'aged' tail pointer ready to + * used for reading. + * + * Initial values of 0xffffffff are invalid and imply that an + * update is required (and should be ignored by an attempted + * read) + */ + struct { + u32 offset; + } tails[2]; + + /** + * @aged_tail_idx: Index for the aged tail ready to read() data up to. + */ + unsigned int aged_tail_idx; + + /** + * @aging_timestamp: A monotonic timestamp for when the current aging tail pointer + * was read; used to determine when it is old enough to trust. + */ + u64 aging_timestamp; + + /** + * @head: Although we can always read back the head pointer register, + * we prefer to avoid trusting the HW state, just to avoid any + * risk that some hardware condition could * somehow bump the + * head pointer unpredictably and cause us to forward the wrong + * OA buffer data to userspace. + */ + u32 head; + } oa_buffer; + + /** + * @noa_wait: A batch buffer doing a wait on the GPU for the NOA logic to be + * reprogrammed. + */ + struct i915_vma *noa_wait; +}; + +/** + * struct i915_oa_ops - Gen specific implementation of an OA unit stream + */ +struct i915_oa_ops { + /** + * @is_valid_b_counter_reg: Validates register's address for + * programming boolean counters for a particular platform. + */ + bool (*is_valid_b_counter_reg)(struct i915_perf *perf, u32 addr); + + /** + * @is_valid_mux_reg: Validates register's address for programming mux + * for a particular platform. + */ + bool (*is_valid_mux_reg)(struct i915_perf *perf, u32 addr); + + /** + * @is_valid_flex_reg: Validates register's address for programming + * flex EU filtering for a particular platform. + */ + bool (*is_valid_flex_reg)(struct i915_perf *perf, u32 addr); + + /** + * @enable_metric_set: Selects and applies any MUX configuration to set + * up the Boolean and Custom (B/C) counters that are part of the + * counter reports being sampled. May apply system constraints such as + * disabling EU clock gating as required. + */ + int (*enable_metric_set)(struct i915_perf_stream *stream); + + /** + * @disable_metric_set: Remove system constraints associated with using + * the OA unit. + */ + void (*disable_metric_set)(struct i915_perf_stream *stream); + + /** + * @oa_enable: Enable periodic sampling + */ + void (*oa_enable)(struct i915_perf_stream *stream); + + /** + * @oa_disable: Disable periodic sampling + */ + void (*oa_disable)(struct i915_perf_stream *stream); + + /** + * @read: Copy data from the circular OA buffer into a given userspace + * buffer. + */ + int (*read)(struct i915_perf_stream *stream, + char __user *buf, + size_t count, + size_t *offset); + + /** + * @oa_hw_tail_read: read the OA tail pointer register + * + * In particular this enables us to share all the fiddly code for + * handling the OA unit tail pointer race that affects multiple + * generations. + */ + u32 (*oa_hw_tail_read)(struct i915_perf_stream *stream); +}; + +struct i915_perf { + struct drm_i915_private *i915; + + struct kobject *metrics_kobj; + struct ctl_table_header *sysctl_header; + + /* + * Lock associated with adding/modifying/removing OA configs + * in perf->metrics_idr. + */ + struct mutex metrics_lock; + + /* + * List of dynamic configurations (struct i915_oa_config), you + * need to hold perf->metrics_lock to access it. + */ + struct idr metrics_idr; + + /* + * Lock associated with anything below within this structure + * except exclusive_stream. + */ + struct mutex lock; + + /* + * The stream currently using the OA unit. If accessed + * outside a syscall associated to its file + * descriptor. + */ + struct i915_perf_stream *exclusive_stream; + + /** + * For rate limiting any notifications of spurious + * invalid OA reports + */ + struct ratelimit_state spurious_report_rs; + + struct i915_oa_config test_config; + + u32 gen7_latched_oastatus1; + u32 ctx_oactxctrl_offset; + u32 ctx_flexeu0_offset; + + /** + * The RPT_ID/reason field for Gen8+ includes a bit + * to determine if the CTX ID in the report is valid + * but the specific bit differs between Gen 8 and 9 + */ + u32 gen8_valid_ctx_bit; + + struct i915_oa_ops ops; + const struct i915_oa_format *oa_formats; + + atomic64_t noa_programming_delay; +}; + +#endif /* _I915_PERF_TYPES_H_ */ diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 212acaef581e..0d40dccd1409 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -11,6 +11,8 @@ #include "gt/intel_engine_pm.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt_pm.h" +#include "gt/intel_rc6.h" +#include "gt/intel_rps.h" #include "i915_drv.h" #include "i915_pmu.h" @@ -116,22 +118,124 @@ static bool pmu_needs_timer(struct i915_pmu *pmu, bool gpu_active) return enable; } -void i915_pmu_gt_parked(struct drm_i915_private *i915) +static u64 __get_rc6(struct intel_gt *gt) { - struct i915_pmu *pmu = &i915->pmu; + struct drm_i915_private *i915 = gt->i915; + u64 val; - if (!pmu->base.event_init) - return; + val = intel_rc6_residency_ns(>->rc6, + IS_VALLEYVIEW(i915) ? + VLV_GT_RENDER_RC6 : + GEN6_GT_GFX_RC6); + + if (HAS_RC6p(i915)) + val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6p); + + if (HAS_RC6pp(i915)) + val += intel_rc6_residency_ns(>->rc6, GEN6_GT_GFX_RC6pp); + + return val; +} + +#if IS_ENABLED(CONFIG_PM) + +static inline s64 ktime_since(const ktime_t kt) +{ + return ktime_to_ns(ktime_sub(ktime_get(), kt)); +} + +static u64 __pmu_estimate_rc6(struct i915_pmu *pmu) +{ + u64 val; - spin_lock_irq(&pmu->lock); /* - * Signal sampling timer to stop if only engine events are enabled and - * GPU went idle. + * We think we are runtime suspended. + * + * Report the delta from when the device was suspended to now, + * on top of the last known real value, as the approximated RC6 + * counter value. */ - pmu->timer_enabled = pmu_needs_timer(pmu, false); - spin_unlock_irq(&pmu->lock); + val = ktime_since(pmu->sleep_last); + val += pmu->sample[__I915_SAMPLE_RC6].cur; + + pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + + return val; } +static u64 __pmu_update_rc6(struct i915_pmu *pmu, u64 val) +{ + /* + * If we are coming back from being runtime suspended we must + * be careful not to report a larger value than returned + * previously. + */ + if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { + pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; + pmu->sample[__I915_SAMPLE_RC6].cur = val; + } else { + val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur; + } + + return val; +} + +static u64 get_rc6(struct intel_gt *gt) +{ + struct drm_i915_private *i915 = gt->i915; + struct i915_pmu *pmu = &i915->pmu; + unsigned long flags; + u64 val; + + val = 0; + if (intel_gt_pm_get_if_awake(gt)) { + val = __get_rc6(gt); + intel_gt_pm_put(gt); + } + + spin_lock_irqsave(&pmu->lock, flags); + + if (val) + val = __pmu_update_rc6(pmu, val); + else + val = __pmu_estimate_rc6(pmu); + + spin_unlock_irqrestore(&pmu->lock, flags); + + return val; +} + +static void park_rc6(struct drm_i915_private *i915) +{ + struct i915_pmu *pmu = &i915->pmu; + + if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) + __pmu_update_rc6(pmu, __get_rc6(&i915->gt)); + + pmu->sleep_last = ktime_get(); +} + +static void unpark_rc6(struct drm_i915_private *i915) +{ + struct i915_pmu *pmu = &i915->pmu; + + /* Estimate how long we slept and accumulate that into rc6 counters */ + if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) + __pmu_estimate_rc6(pmu); +} + +#else + +static u64 get_rc6(struct intel_gt *gt) +{ + return __get_rc6(gt); +} + +static void park_rc6(struct drm_i915_private *i915) {} +static void unpark_rc6(struct drm_i915_private *i915) {} + +#endif + static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) { if (!pmu->timer_enabled && pmu_needs_timer(pmu, true)) { @@ -143,6 +247,26 @@ static void __i915_pmu_maybe_start_timer(struct i915_pmu *pmu) } } +void i915_pmu_gt_parked(struct drm_i915_private *i915) +{ + struct i915_pmu *pmu = &i915->pmu; + + if (!pmu->base.event_init) + return; + + spin_lock_irq(&pmu->lock); + + park_rc6(i915); + + /* + * Signal sampling timer to stop if only engine events are enabled and + * GPU went idle. + */ + pmu->timer_enabled = pmu_needs_timer(pmu, false); + + spin_unlock_irq(&pmu->lock); +} + void i915_pmu_gt_unparked(struct drm_i915_private *i915) { struct i915_pmu *pmu = &i915->pmu; @@ -151,10 +275,14 @@ void i915_pmu_gt_unparked(struct drm_i915_private *i915) return; spin_lock_irq(&pmu->lock); + /* * Re-enable sampling timer when GPU goes active. */ __i915_pmu_maybe_start_timer(pmu); + + unpark_rc6(i915); + spin_unlock_irq(&pmu->lock); } @@ -174,7 +302,7 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) if ((i915->pmu.enable & ENGINE_SAMPLE_MASK) == 0) return; - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { struct intel_engine_pmu *pmu = &engine->pmu; unsigned long flags; bool busy; @@ -194,6 +322,10 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns) if (val & RING_WAIT_SEMAPHORE) add_sample(&pmu->sample[I915_SAMPLE_SEMA], period_ns); + /* No need to sample when busy stats are supported. */ + if (intel_engine_supports_stats(engine)) + goto skip; + /* * While waiting on a semaphore or event, MI_MODE reports the * ring as idle. However, previously using the seqno, and with @@ -227,25 +359,26 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns) struct drm_i915_private *i915 = gt->i915; struct intel_uncore *uncore = gt->uncore; struct i915_pmu *pmu = &i915->pmu; + struct intel_rps *rps = >->rps; if (pmu->enable & config_enabled_mask(I915_PMU_ACTUAL_FREQUENCY)) { u32 val; - val = i915->gt_pm.rps.cur_freq; + val = rps->cur_freq; if (intel_gt_pm_get_if_awake(gt)) { val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1); - val = intel_get_cagf(i915, val); + val = intel_get_cagf(rps, val); intel_gt_pm_put(gt); } add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT], - intel_gpu_freq(i915, val), + intel_gpu_freq(rps, val), period_ns / 1000); } if (pmu->enable & config_enabled_mask(I915_PMU_REQUESTED_FREQUENCY)) { add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_REQ], - intel_gpu_freq(i915, i915->gt_pm.rps.cur_freq), + intel_gpu_freq(rps, rps->cur_freq), period_ns / 1000); } } @@ -426,104 +559,6 @@ static int i915_pmu_event_init(struct perf_event *event) return 0; } -static u64 __get_rc6(struct intel_gt *gt) -{ - struct drm_i915_private *i915 = gt->i915; - u64 val; - - val = intel_rc6_residency_ns(i915, - IS_VALLEYVIEW(i915) ? - VLV_GT_RENDER_RC6 : - GEN6_GT_GFX_RC6); - - if (HAS_RC6p(i915)) - val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); - - if (HAS_RC6pp(i915)) - val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); - - return val; -} - -static u64 get_rc6(struct intel_gt *gt) -{ -#if IS_ENABLED(CONFIG_PM) - struct drm_i915_private *i915 = gt->i915; - struct intel_runtime_pm *rpm = &i915->runtime_pm; - struct i915_pmu *pmu = &i915->pmu; - intel_wakeref_t wakeref; - unsigned long flags; - u64 val; - - wakeref = intel_runtime_pm_get_if_in_use(rpm); - if (wakeref) { - val = __get_rc6(gt); - intel_runtime_pm_put(rpm, wakeref); - - /* - * If we are coming back from being runtime suspended we must - * be careful not to report a larger value than returned - * previously. - */ - - spin_lock_irqsave(&pmu->lock, flags); - - if (val >= pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { - pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; - pmu->sample[__I915_SAMPLE_RC6].cur = val; - } else { - val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur; - } - - spin_unlock_irqrestore(&pmu->lock, flags); - } else { - struct device *kdev = rpm->kdev; - - /* - * We are runtime suspended. - * - * Report the delta from when the device was suspended to now, - * on top of the last known real value, as the approximated RC6 - * counter value. - */ - spin_lock_irqsave(&pmu->lock, flags); - - /* - * After the above branch intel_runtime_pm_get_if_in_use failed - * to get the runtime PM reference we cannot assume we are in - * runtime suspend since we can either: a) race with coming out - * of it before we took the power.lock, or b) there are other - * states than suspended which can bring us here. - * - * We need to double-check that we are indeed currently runtime - * suspended and if not we cannot do better than report the last - * known RC6 value. - */ - if (pm_runtime_status_suspended(kdev)) { - val = pm_runtime_suspended_time(kdev); - - if (!pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) - pmu->suspended_time_last = val; - - val -= pmu->suspended_time_last; - val += pmu->sample[__I915_SAMPLE_RC6].cur; - - pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; - } else if (pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { - val = pmu->sample[__I915_SAMPLE_RC6_ESTIMATED].cur; - } else { - val = pmu->sample[__I915_SAMPLE_RC6].cur; - } - - spin_unlock_irqrestore(&pmu->lock, flags); - } - - return val; -#else - return __get_rc6(gt); -#endif -} - static u64 __i915_pmu_event_read(struct perf_event *event) { struct drm_i915_private *i915 = @@ -1047,21 +1082,43 @@ static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu) cpuhp_remove_multi_state(cpuhp_slot); } +static bool is_igp(struct drm_i915_private *i915) +{ + struct pci_dev *pdev = i915->drm.pdev; + + /* IGP is 0000:00:02.0 */ + return pci_domain_nr(pdev->bus) == 0 && + pdev->bus->number == 0 && + PCI_SLOT(pdev->devfn) == 2 && + PCI_FUNC(pdev->devfn) == 0; +} + void i915_pmu_register(struct drm_i915_private *i915) { struct i915_pmu *pmu = &i915->pmu; - int ret; + int ret = -ENOMEM; if (INTEL_GEN(i915) <= 2) { dev_info(i915->drm.dev, "PMU not supported for this GPU."); return; } - i915_pmu_events_attr_group.attrs = create_event_attributes(pmu); - if (!i915_pmu_events_attr_group.attrs) { - ret = -ENOMEM; + spin_lock_init(&pmu->lock); + hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + pmu->timer.function = i915_sample; + + if (!is_igp(i915)) + pmu->name = kasprintf(GFP_KERNEL, + "i915-%s", + dev_name(i915->drm.dev)); + else + pmu->name = "i915"; + if (!pmu->name) goto err; - } + + i915_pmu_events_attr_group.attrs = create_event_attributes(pmu); + if (!i915_pmu_events_attr_group.attrs) + goto err_name; pmu->base.attr_groups = i915_pmu_attr_groups; pmu->base.task_ctx_nr = perf_invalid_context; @@ -1073,13 +1130,9 @@ void i915_pmu_register(struct drm_i915_private *i915) pmu->base.read = i915_pmu_event_read; pmu->base.event_idx = i915_pmu_event_event_idx; - spin_lock_init(&pmu->lock); - hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - pmu->timer.function = i915_sample; - - ret = perf_pmu_register(&pmu->base, "i915", -1); + ret = perf_pmu_register(&pmu->base, pmu->name, -1); if (ret) - goto err; + goto err_attr; ret = i915_pmu_register_cpuhp_state(pmu); if (ret) @@ -1089,10 +1142,14 @@ void i915_pmu_register(struct drm_i915_private *i915) err_unreg: perf_pmu_unregister(&pmu->base); -err: +err_attr: pmu->base.event_init = NULL; free_event_attributes(pmu); - DRM_NOTE("Failed to register PMU! (err=%d)\n", ret); +err_name: + if (!is_igp(i915)) + kfree(pmu->name); +err: + dev_notice(i915->drm.dev, "Failed to register PMU!\n"); } void i915_pmu_unregister(struct drm_i915_private *i915) @@ -1110,5 +1167,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915) perf_pmu_unregister(&pmu->base); pmu->base.event_init = NULL; + if (!is_igp(i915)) + kfree(pmu->name); free_event_attributes(pmu); } diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h index 4fc4f2478301..bf52e3983631 100644 --- a/drivers/gpu/drm/i915/i915_pmu.h +++ b/drivers/gpu/drm/i915/i915_pmu.h @@ -47,6 +47,10 @@ struct i915_pmu { */ struct pmu base; /** + * @name: Name as registered with perf core. + */ + const char *name; + /** * @lock: Lock protecting enable mask and ref count handling. */ spinlock_t lock; @@ -97,9 +101,9 @@ struct i915_pmu { */ struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; /** - * @suspended_time_last: Cached suspend time from PM core. + * @sleep_last: Last time GT parked for RC6 estimation. */ - u64 suspended_time_last; + ktime_t sleep_last; /** * @i915_attr: Memory block holding device attributes. */ diff --git a/drivers/gpu/drm/i915/i915_priolist_types.h b/drivers/gpu/drm/i915/i915_priolist_types.h index 21037a2e2038..732aad148881 100644 --- a/drivers/gpu/drm/i915/i915_priolist_types.h +++ b/drivers/gpu/drm/i915/i915_priolist_types.h @@ -16,6 +16,12 @@ enum { I915_PRIORITY_MIN = I915_CONTEXT_MIN_USER_PRIORITY - 1, I915_PRIORITY_NORMAL = I915_CONTEXT_DEFAULT_PRIORITY, I915_PRIORITY_MAX = I915_CONTEXT_MAX_USER_PRIORITY + 1, + + /* A preemptive pulse used to monitor the health of each engine */ + I915_PRIORITY_HEARTBEAT, + + /* Interactive workload, scheduled for immediate pageflipping */ + I915_PRIORITY_DISPLAY, }; #define I915_USER_PRIORITY_SHIFT 2 @@ -39,6 +45,7 @@ enum { * active request. */ #define I915_PRIORITY_UNPREEMPTABLE INT_MAX +#define I915_PRIORITY_BARRIER INT_MAX #define __NO_PREEMPTION (I915_PRIORITY_WAIT) diff --git a/drivers/gpu/drm/i915/i915_query.c b/drivers/gpu/drm/i915/i915_query.c index ad9240a0817a..c27cfef9281c 100644 --- a/drivers/gpu/drm/i915/i915_query.c +++ b/drivers/gpu/drm/i915/i915_query.c @@ -7,6 +7,7 @@ #include <linux/nospec.h> #include "i915_drv.h" +#include "i915_perf.h" #include "i915_query.h" #include <uapi/drm/i915_drm.h> @@ -37,8 +38,6 @@ static int query_topology_info(struct drm_i915_private *dev_priv, const struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; struct drm_i915_query_topology_info topo; u32 slice_length, subslice_length, eu_length, total_length; - u8 subslice_stride = GEN_SSEU_STRIDE(sseu->max_subslices); - u8 eu_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); int ret; if (query_item->flags != 0) @@ -50,8 +49,8 @@ static int query_topology_info(struct drm_i915_private *dev_priv, BUILD_BUG_ON(sizeof(u8) != sizeof(sseu->slice_mask)); slice_length = sizeof(sseu->slice_mask); - subslice_length = sseu->max_slices * subslice_stride; - eu_length = sseu->max_slices * sseu->max_subslices * eu_stride; + subslice_length = sseu->max_slices * sseu->ss_stride; + eu_length = sseu->max_slices * sseu->max_subslices * sseu->eu_stride; total_length = sizeof(topo) + slice_length + subslice_length + eu_length; @@ -69,9 +68,9 @@ static int query_topology_info(struct drm_i915_private *dev_priv, topo.max_eus_per_subslice = sseu->max_eus_per_subslice; topo.subslice_offset = slice_length; - topo.subslice_stride = subslice_stride; + topo.subslice_stride = sseu->ss_stride; topo.eu_offset = slice_length + subslice_length; - topo.eu_stride = eu_stride; + topo.eu_stride = sseu->eu_stride; if (__copy_to_user(u64_to_user_ptr(query_item->data_ptr), &topo, sizeof(topo))) @@ -142,10 +141,305 @@ query_engine_info(struct drm_i915_private *i915, return len; } +static int can_copy_perf_config_registers_or_number(u32 user_n_regs, + u64 user_regs_ptr, + u32 kernel_n_regs) +{ + /* + * We'll just put the number of registers, and won't copy the + * register. + */ + if (user_n_regs == 0) + return 0; + + if (user_n_regs < kernel_n_regs) + return -EINVAL; + + if (!access_ok(u64_to_user_ptr(user_regs_ptr), + 2 * sizeof(u32) * kernel_n_regs)) + return -EFAULT; + + return 0; +} + +static int copy_perf_config_registers_or_number(const struct i915_oa_reg *kernel_regs, + u32 kernel_n_regs, + u64 user_regs_ptr, + u32 *user_n_regs) +{ + u32 r; + + if (*user_n_regs == 0) { + *user_n_regs = kernel_n_regs; + return 0; + } + + *user_n_regs = kernel_n_regs; + + for (r = 0; r < kernel_n_regs; r++) { + u32 __user *user_reg_ptr = + u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2); + u32 __user *user_val_ptr = + u64_to_user_ptr(user_regs_ptr + sizeof(u32) * r * 2 + + sizeof(u32)); + int ret; + + ret = __put_user(i915_mmio_reg_offset(kernel_regs[r].addr), + user_reg_ptr); + if (ret) + return -EFAULT; + + ret = __put_user(kernel_regs[r].value, user_val_ptr); + if (ret) + return -EFAULT; + } + + return 0; +} + +static int query_perf_config_data(struct drm_i915_private *i915, + struct drm_i915_query_item *query_item, + bool use_uuid) +{ + struct drm_i915_query_perf_config __user *user_query_config_ptr = + u64_to_user_ptr(query_item->data_ptr); + struct drm_i915_perf_oa_config __user *user_config_ptr = + u64_to_user_ptr(query_item->data_ptr + + sizeof(struct drm_i915_query_perf_config)); + struct drm_i915_perf_oa_config user_config; + struct i915_perf *perf = &i915->perf; + struct i915_oa_config *oa_config; + char uuid[UUID_STRING_LEN + 1]; + u64 config_id; + u32 flags, total_size; + int ret; + + if (!perf->i915) + return -ENODEV; + + total_size = + sizeof(struct drm_i915_query_perf_config) + + sizeof(struct drm_i915_perf_oa_config); + + if (query_item->length == 0) + return total_size; + + if (query_item->length < total_size) { + DRM_DEBUG("Invalid query config data item size=%u expected=%u\n", + query_item->length, total_size); + return -EINVAL; + } + + if (!access_ok(user_query_config_ptr, total_size)) + return -EFAULT; + + if (__get_user(flags, &user_query_config_ptr->flags)) + return -EFAULT; + + if (flags != 0) + return -EINVAL; + + if (use_uuid) { + struct i915_oa_config *tmp; + int id; + + BUILD_BUG_ON(sizeof(user_query_config_ptr->uuid) >= sizeof(uuid)); + + memset(&uuid, 0, sizeof(uuid)); + if (__copy_from_user(uuid, user_query_config_ptr->uuid, + sizeof(user_query_config_ptr->uuid))) + return -EFAULT; + + oa_config = NULL; + rcu_read_lock(); + idr_for_each_entry(&perf->metrics_idr, tmp, id) { + if (!strcmp(tmp->uuid, uuid)) { + oa_config = i915_oa_config_get(tmp); + break; + } + } + rcu_read_unlock(); + } else { + if (__get_user(config_id, &user_query_config_ptr->config)) + return -EFAULT; + + oa_config = i915_perf_get_oa_config(perf, config_id); + } + if (!oa_config) + return -ENOENT; + + if (__copy_from_user(&user_config, user_config_ptr, + sizeof(user_config))) { + ret = -EFAULT; + goto out; + } + + ret = can_copy_perf_config_registers_or_number(user_config.n_boolean_regs, + user_config.boolean_regs_ptr, + oa_config->b_counter_regs_len); + if (ret) + goto out; + + ret = can_copy_perf_config_registers_or_number(user_config.n_flex_regs, + user_config.flex_regs_ptr, + oa_config->flex_regs_len); + if (ret) + goto out; + + ret = can_copy_perf_config_registers_or_number(user_config.n_mux_regs, + user_config.mux_regs_ptr, + oa_config->mux_regs_len); + if (ret) + goto out; + + ret = copy_perf_config_registers_or_number(oa_config->b_counter_regs, + oa_config->b_counter_regs_len, + user_config.boolean_regs_ptr, + &user_config.n_boolean_regs); + if (ret) + goto out; + + ret = copy_perf_config_registers_or_number(oa_config->flex_regs, + oa_config->flex_regs_len, + user_config.flex_regs_ptr, + &user_config.n_flex_regs); + if (ret) + goto out; + + ret = copy_perf_config_registers_or_number(oa_config->mux_regs, + oa_config->mux_regs_len, + user_config.mux_regs_ptr, + &user_config.n_mux_regs); + if (ret) + goto out; + + memcpy(user_config.uuid, oa_config->uuid, sizeof(user_config.uuid)); + + if (__copy_to_user(user_config_ptr, &user_config, + sizeof(user_config))) { + ret = -EFAULT; + goto out; + } + + ret = total_size; + +out: + i915_oa_config_put(oa_config); + return ret; +} + +static size_t sizeof_perf_config_list(size_t count) +{ + return sizeof(struct drm_i915_query_perf_config) + sizeof(u64) * count; +} + +static size_t sizeof_perf_metrics(struct i915_perf *perf) +{ + struct i915_oa_config *tmp; + size_t i; + int id; + + i = 1; + rcu_read_lock(); + idr_for_each_entry(&perf->metrics_idr, tmp, id) + i++; + rcu_read_unlock(); + + return sizeof_perf_config_list(i); +} + +static int query_perf_config_list(struct drm_i915_private *i915, + struct drm_i915_query_item *query_item) +{ + struct drm_i915_query_perf_config __user *user_query_config_ptr = + u64_to_user_ptr(query_item->data_ptr); + struct i915_perf *perf = &i915->perf; + u64 *oa_config_ids = NULL; + int alloc, n_configs; + u32 flags; + int ret; + + if (!perf->i915) + return -ENODEV; + + if (query_item->length == 0) + return sizeof_perf_metrics(perf); + + if (get_user(flags, &user_query_config_ptr->flags)) + return -EFAULT; + + if (flags != 0) + return -EINVAL; + + n_configs = 1; + do { + struct i915_oa_config *tmp; + u64 *ids; + int id; + + ids = krealloc(oa_config_ids, + n_configs * sizeof(*oa_config_ids), + GFP_KERNEL); + if (!ids) + return -ENOMEM; + + alloc = fetch_and_zero(&n_configs); + + ids[n_configs++] = 1ull; /* reserved for test_config */ + rcu_read_lock(); + idr_for_each_entry(&perf->metrics_idr, tmp, id) { + if (n_configs < alloc) + ids[n_configs] = id; + n_configs++; + } + rcu_read_unlock(); + + oa_config_ids = ids; + } while (n_configs > alloc); + + if (query_item->length < sizeof_perf_config_list(n_configs)) { + DRM_DEBUG("Invalid query config list item size=%u expected=%zu\n", + query_item->length, + sizeof_perf_config_list(n_configs)); + kfree(oa_config_ids); + return -EINVAL; + } + + if (put_user(n_configs, &user_query_config_ptr->config)) { + kfree(oa_config_ids); + return -EFAULT; + } + + ret = copy_to_user(user_query_config_ptr + 1, + oa_config_ids, + n_configs * sizeof(*oa_config_ids)); + kfree(oa_config_ids); + if (ret) + return -EFAULT; + + return sizeof_perf_config_list(n_configs); +} + +static int query_perf_config(struct drm_i915_private *i915, + struct drm_i915_query_item *query_item) +{ + switch (query_item->flags) { + case DRM_I915_QUERY_PERF_CONFIG_LIST: + return query_perf_config_list(i915, query_item); + case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID: + return query_perf_config_data(i915, query_item, true); + case DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID: + return query_perf_config_data(i915, query_item, false); + default: + return -EINVAL; + } +} + static int (* const i915_query_funcs[])(struct drm_i915_private *dev_priv, struct drm_i915_query_item *query_item) = { query_topology_info, query_engine_info, + query_perf_config, }; int i915_query_ioctl(struct drm_device *dev, void *data, struct drm_file *file) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f8ee9aba3955..73079b503724 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -413,6 +413,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN11_VECS_SFC_USAGE(engine) _MMIO((engine)->mmio_base + 0x2014) #define GEN11_VECS_SFC_USAGE_BIT (1 << 0) +#define GEN12_SFC_DONE(n) _MMIO(0x1cc00 + (n) * 0x100) +#define GEN12_SFC_DONE_MAX 4 + #define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228) #define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518) #define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220) @@ -547,7 +550,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MI_PREDICATE_SRC0_UDW _MMIO(0x2400 + 4) #define MI_PREDICATE_SRC1 _MMIO(0x2408) #define MI_PREDICATE_SRC1_UDW _MMIO(0x2408 + 4) - +#define MI_PREDICATE_DATA _MMIO(0x2410) +#define MI_PREDICATE_RESULT _MMIO(0x2418) +#define MI_PREDICATE_RESULT_1 _MMIO(0x241c) #define MI_PREDICATE_RESULT_2 _MMIO(0x2214) #define LOWER_SLICE_ENABLED (1 << 0) #define LOWER_SLICE_DISABLED (0 << 0) @@ -688,6 +693,45 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OABUFFER_SIZE_8M (6 << 3) #define OABUFFER_SIZE_16M (7 << 3) +/* Gen12 OAR unit */ +#define GEN12_OAR_OACONTROL _MMIO(0x2960) +#define GEN12_OAR_OACONTROL_COUNTER_FORMAT_SHIFT 1 +#define GEN12_OAR_OACONTROL_COUNTER_ENABLE (1 << 0) + +#define GEN12_OACTXCONTROL _MMIO(0x2360) +#define GEN12_OAR_OASTATUS _MMIO(0x2968) + +/* Gen12 OAG unit */ +#define GEN12_OAG_OAHEADPTR _MMIO(0xdb00) +#define GEN12_OAG_OAHEADPTR_MASK 0xffffffc0 +#define GEN12_OAG_OATAILPTR _MMIO(0xdb04) +#define GEN12_OAG_OATAILPTR_MASK 0xffffffc0 + +#define GEN12_OAG_OABUFFER _MMIO(0xdb08) +#define GEN12_OAG_OABUFFER_BUFFER_SIZE_MASK (0x7) +#define GEN12_OAG_OABUFFER_BUFFER_SIZE_SHIFT (3) +#define GEN12_OAG_OABUFFER_MEMORY_SELECT (1 << 0) /* 0: PPGTT, 1: GGTT */ + +#define GEN12_OAG_OAGLBCTXCTRL _MMIO(0x2b28) +#define GEN12_OAG_OAGLBCTXCTRL_TIMER_PERIOD_SHIFT 2 +#define GEN12_OAG_OAGLBCTXCTRL_TIMER_ENABLE (1 << 1) +#define GEN12_OAG_OAGLBCTXCTRL_COUNTER_RESUME (1 << 0) + +#define GEN12_OAG_OACONTROL _MMIO(0xdaf4) +#define GEN12_OAG_OACONTROL_OA_COUNTER_FORMAT_SHIFT 2 +#define GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE (1 << 0) + +#define GEN12_OAG_OA_DEBUG _MMIO(0xdaf8) +#define GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO (1 << 6) +#define GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS (1 << 5) +#define GEN12_OAG_OA_DEBUG_DISABLE_GO_1_0_REPORTS (1 << 2) +#define GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS (1 << 1) + +#define GEN12_OAG_OASTATUS _MMIO(0xdafc) +#define GEN12_OAG_OASTATUS_COUNTER_OVERFLOW (1 << 2) +#define GEN12_OAG_OASTATUS_BUFFER_OVERFLOW (1 << 1) +#define GEN12_OAG_OASTATUS_REPORT_LOST (1 << 0) + /* * Flexible, Aggregate EU Counter Registers. * Note: these aren't contiguous @@ -924,6 +968,26 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OAREPORTTRIG8_NOA_SELECT_6_SHIFT 24 #define OAREPORTTRIG8_NOA_SELECT_7_SHIFT 28 +/* Same layout as OASTARTTRIGX */ +#define GEN12_OAG_OASTARTTRIG1 _MMIO(0xd900) +#define GEN12_OAG_OASTARTTRIG2 _MMIO(0xd904) +#define GEN12_OAG_OASTARTTRIG3 _MMIO(0xd908) +#define GEN12_OAG_OASTARTTRIG4 _MMIO(0xd90c) +#define GEN12_OAG_OASTARTTRIG5 _MMIO(0xd910) +#define GEN12_OAG_OASTARTTRIG6 _MMIO(0xd914) +#define GEN12_OAG_OASTARTTRIG7 _MMIO(0xd918) +#define GEN12_OAG_OASTARTTRIG8 _MMIO(0xd91c) + +/* Same layout as OAREPORTTRIGX */ +#define GEN12_OAG_OAREPORTTRIG1 _MMIO(0xd920) +#define GEN12_OAG_OAREPORTTRIG2 _MMIO(0xd924) +#define GEN12_OAG_OAREPORTTRIG3 _MMIO(0xd928) +#define GEN12_OAG_OAREPORTTRIG4 _MMIO(0xd92c) +#define GEN12_OAG_OAREPORTTRIG5 _MMIO(0xd930) +#define GEN12_OAG_OAREPORTTRIG6 _MMIO(0xd934) +#define GEN12_OAG_OAREPORTTRIG7 _MMIO(0xd938) +#define GEN12_OAG_OAREPORTTRIG8 _MMIO(0xd93c) + /* CECX_0 */ #define OACEC_COMPARE_LESS_OR_EQUAL 6 #define OACEC_COMPARE_NOT_EQUAL 5 @@ -940,6 +1004,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OACEC_SELECT_PREV (1 << 19) #define OACEC_SELECT_BOOLEAN (2 << 19) +/* 11-bit array 0: pass-through, 1: negated */ +#define GEN12_OASCEC_NEGATE_MASK 0x7ff +#define GEN12_OASCEC_NEGATE_SHIFT 21 + /* CECX_1 */ #define OACEC_MASK_MASK 0xffff #define OACEC_CONSIDERATIONS_MASK 0xffff @@ -962,6 +1030,42 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define OACEC7_0 _MMIO(0x27a8) #define OACEC7_1 _MMIO(0x27ac) +/* Same layout as CECX_Y */ +#define GEN12_OAG_CEC0_0 _MMIO(0xd940) +#define GEN12_OAG_CEC0_1 _MMIO(0xd944) +#define GEN12_OAG_CEC1_0 _MMIO(0xd948) +#define GEN12_OAG_CEC1_1 _MMIO(0xd94c) +#define GEN12_OAG_CEC2_0 _MMIO(0xd950) +#define GEN12_OAG_CEC2_1 _MMIO(0xd954) +#define GEN12_OAG_CEC3_0 _MMIO(0xd958) +#define GEN12_OAG_CEC3_1 _MMIO(0xd95c) +#define GEN12_OAG_CEC4_0 _MMIO(0xd960) +#define GEN12_OAG_CEC4_1 _MMIO(0xd964) +#define GEN12_OAG_CEC5_0 _MMIO(0xd968) +#define GEN12_OAG_CEC5_1 _MMIO(0xd96c) +#define GEN12_OAG_CEC6_0 _MMIO(0xd970) +#define GEN12_OAG_CEC6_1 _MMIO(0xd974) +#define GEN12_OAG_CEC7_0 _MMIO(0xd978) +#define GEN12_OAG_CEC7_1 _MMIO(0xd97c) + +/* Same layout as CECX_Y + negate 11-bit array */ +#define GEN12_OAG_SCEC0_0 _MMIO(0xdc00) +#define GEN12_OAG_SCEC0_1 _MMIO(0xdc04) +#define GEN12_OAG_SCEC1_0 _MMIO(0xdc08) +#define GEN12_OAG_SCEC1_1 _MMIO(0xdc0c) +#define GEN12_OAG_SCEC2_0 _MMIO(0xdc10) +#define GEN12_OAG_SCEC2_1 _MMIO(0xdc14) +#define GEN12_OAG_SCEC3_0 _MMIO(0xdc18) +#define GEN12_OAG_SCEC3_1 _MMIO(0xdc1c) +#define GEN12_OAG_SCEC4_0 _MMIO(0xdc20) +#define GEN12_OAG_SCEC4_1 _MMIO(0xdc24) +#define GEN12_OAG_SCEC5_0 _MMIO(0xdc28) +#define GEN12_OAG_SCEC5_1 _MMIO(0xdc2c) +#define GEN12_OAG_SCEC6_0 _MMIO(0xdc30) +#define GEN12_OAG_SCEC6_1 _MMIO(0xdc34) +#define GEN12_OAG_SCEC7_0 _MMIO(0xdc38) +#define GEN12_OAG_SCEC7_1 _MMIO(0xdc3c) + /* OA perf counters */ #define OA_PERFCNT1_LO _MMIO(0x91B8) #define OA_PERFCNT1_HI _MMIO(0x91BC) @@ -1042,6 +1146,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MICRO_BP3_COUNT_STATUS23 _MMIO(0x9838) #define MICRO_BP_FIRED_ARMED _MMIO(0x983C) +#define GEN12_OAA_DBG_REG _MMIO(0xdc44) +#define GEN12_OAG_OA_PESS _MMIO(0x2b2c) +#define GEN12_OAG_SPCTR_CNF _MMIO(0xdc40) + #define GDT_CHICKEN_BITS _MMIO(0x9840) #define GT_NOA_ENABLE 0x00000080 @@ -1962,8 +2070,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG) #define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7) -#define MG_PHY_PORT_LN(ln, port, ln0p1, ln0p2, ln1p1) \ - _MMIO(_PORT((port) - PORT_C, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) +#define MG_PHY_PORT_LN(ln, tc_port, ln0p1, ln0p2, ln1p1) \ + _MMIO(_PORT(tc_port, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) #define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C #define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C @@ -1973,10 +2081,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C #define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C #define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C -#define MG_TX1_LINK_PARAMS(ln, port) \ - MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ - MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ - MG_TX_LINK_PARAMS_TX1LN1_PORT1) +#define MG_TX1_LINK_PARAMS(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ + MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ + MG_TX_LINK_PARAMS_TX1LN1_PORT1) #define MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC #define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC @@ -1986,10 +2094,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC #define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC #define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC -#define MG_TX2_LINK_PARAMS(ln, port) \ - MG_PHY_PORT_LN(ln, port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ - MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ - MG_TX_LINK_PARAMS_TX2LN1_PORT1) +#define MG_TX2_LINK_PARAMS(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ + MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ + MG_TX_LINK_PARAMS_TX2LN1_PORT1) #define CRI_USE_FS32 (1 << 5) #define MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C @@ -2000,10 +2108,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C #define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C #define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C -#define MG_TX1_PISO_READLOAD(ln, port) \ - MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ - MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ - MG_TX_PISO_READLOAD_TX1LN1_PORT1) +#define MG_TX1_PISO_READLOAD(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ + MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ + MG_TX_PISO_READLOAD_TX1LN1_PORT1) #define MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC #define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC @@ -2013,10 +2121,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC #define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC #define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC -#define MG_TX2_PISO_READLOAD(ln, port) \ - MG_PHY_PORT_LN(ln, port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ - MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ - MG_TX_PISO_READLOAD_TX2LN1_PORT1) +#define MG_TX2_PISO_READLOAD(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ + MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ + MG_TX_PISO_READLOAD_TX2LN1_PORT1) #define CRI_CALCINIT (1 << 1) #define MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148 @@ -2027,10 +2135,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548 #define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148 #define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548 -#define MG_TX1_SWINGCTRL(ln, port) \ - MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \ - MG_TX_SWINGCTRL_TX1LN0_PORT2, \ - MG_TX_SWINGCTRL_TX1LN1_PORT1) +#define MG_TX1_SWINGCTRL(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \ + MG_TX_SWINGCTRL_TX1LN0_PORT2, \ + MG_TX_SWINGCTRL_TX1LN1_PORT1) #define MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8 #define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8 @@ -2040,10 +2148,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8 #define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8 #define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8 -#define MG_TX2_SWINGCTRL(ln, port) \ - MG_PHY_PORT_LN(ln, port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \ - MG_TX_SWINGCTRL_TX2LN0_PORT2, \ - MG_TX_SWINGCTRL_TX2LN1_PORT1) +#define MG_TX2_SWINGCTRL(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \ + MG_TX_SWINGCTRL_TX2LN0_PORT2, \ + MG_TX_SWINGCTRL_TX2LN1_PORT1) #define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0) #define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0) @@ -2055,10 +2163,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544 #define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144 #define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544 -#define MG_TX1_DRVCTRL(ln, port) \ - MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \ - MG_TX_DRVCTRL_TX1LN0_TXPORT2, \ - MG_TX_DRVCTRL_TX1LN1_TXPORT1) +#define MG_TX1_DRVCTRL(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \ + MG_TX_DRVCTRL_TX1LN0_TXPORT2, \ + MG_TX_DRVCTRL_TX1LN1_TXPORT1) #define MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4 #define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4 @@ -2068,10 +2176,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4 #define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4 #define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4 -#define MG_TX2_DRVCTRL(ln, port) \ - MG_PHY_PORT_LN(ln, port, MG_TX_DRVCTRL_TX2LN0_PORT1, \ - MG_TX_DRVCTRL_TX2LN0_PORT2, \ - MG_TX_DRVCTRL_TX2LN1_PORT1) +#define MG_TX2_DRVCTRL(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX2LN0_PORT1, \ + MG_TX_DRVCTRL_TX2LN0_PORT2, \ + MG_TX_DRVCTRL_TX2LN1_PORT1) #define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24) #define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24) #define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22) @@ -2088,10 +2196,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MG_CLKHUB_LN1_PORT3 0x16A79C #define MG_CLKHUB_LN0_PORT4 0x16B39C #define MG_CLKHUB_LN1_PORT4 0x16B79C -#define MG_CLKHUB(ln, port) \ - MG_PHY_PORT_LN(ln, port, MG_CLKHUB_LN0_PORT1, \ - MG_CLKHUB_LN0_PORT2, \ - MG_CLKHUB_LN1_PORT1) +#define MG_CLKHUB(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_CLKHUB_LN0_PORT1, \ + MG_CLKHUB_LN0_PORT2, \ + MG_CLKHUB_LN1_PORT1) #define CFG_LOW_RATE_LKREN_EN (1 << 11) #define MG_TX_DCC_TX1LN0_PORT1 0x168110 @@ -2102,10 +2210,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MG_TX_DCC_TX1LN1_PORT3 0x16A510 #define MG_TX_DCC_TX1LN0_PORT4 0x16B110 #define MG_TX_DCC_TX1LN1_PORT4 0x16B510 -#define MG_TX1_DCC(ln, port) \ - MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX1LN0_PORT1, \ - MG_TX_DCC_TX1LN0_PORT2, \ - MG_TX_DCC_TX1LN1_PORT1) +#define MG_TX1_DCC(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX1LN0_PORT1, \ + MG_TX_DCC_TX1LN0_PORT2, \ + MG_TX_DCC_TX1LN1_PORT1) #define MG_TX_DCC_TX2LN0_PORT1 0x168090 #define MG_TX_DCC_TX2LN1_PORT1 0x168490 #define MG_TX_DCC_TX2LN0_PORT2 0x169090 @@ -2114,10 +2222,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MG_TX_DCC_TX2LN1_PORT3 0x16A490 #define MG_TX_DCC_TX2LN0_PORT4 0x16B090 #define MG_TX_DCC_TX2LN1_PORT4 0x16B490 -#define MG_TX2_DCC(ln, port) \ - MG_PHY_PORT_LN(ln, port, MG_TX_DCC_TX2LN0_PORT1, \ - MG_TX_DCC_TX2LN0_PORT2, \ - MG_TX_DCC_TX2LN1_PORT1) +#define MG_TX2_DCC(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX2LN0_PORT1, \ + MG_TX_DCC_TX2LN0_PORT2, \ + MG_TX_DCC_TX2LN1_PORT1) #define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25) #define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25) #define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24) @@ -2130,10 +2238,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0 #define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0 #define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0 -#define MG_DP_MODE(ln, port) \ - MG_PHY_PORT_LN(ln, port, MG_DP_MODE_LN0_ACU_PORT1, \ - MG_DP_MODE_LN0_ACU_PORT2, \ - MG_DP_MODE_LN1_ACU_PORT1) +#define MG_DP_MODE(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_DP_MODE_LN0_ACU_PORT1, \ + MG_DP_MODE_LN0_ACU_PORT2, \ + MG_DP_MODE_LN1_ACU_PORT1) #define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7) #define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6) #define MG_DP_MODE_CFG_TR2PWR_GATING (1 << 5) @@ -2172,13 +2280,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off)) /* ICL PHY DFLEX registers */ -#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0) -#define DFLEXDPMLE1_DPMLETC_MASK(tc_port) (0xf << (4 * (tc_port))) -#define DFLEXDPMLE1_DPMLETC_ML0(tc_port) (1 << (4 * (tc_port))) -#define DFLEXDPMLE1_DPMLETC_ML1_0(tc_port) (3 << (4 * (tc_port))) -#define DFLEXDPMLE1_DPMLETC_ML3(tc_port) (8 << (4 * (tc_port))) -#define DFLEXDPMLE1_DPMLETC_ML3_2(tc_port) (12 << (4 * (tc_port))) -#define DFLEXDPMLE1_DPMLETC_ML3_0(tc_port) (15 << (4 * (tc_port))) +#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0) +#define DFLEXDPMLE1_DPMLETC_MASK(idx) (0xf << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML0(idx) (1 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML1_0(idx) (3 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML3(idx) (8 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML3_2(idx) (12 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML3_0(idx) (15 << (4 * (idx))) /* BXT PHY Ref registers */ #define _PORT_REF_DW3_A 0x16218C @@ -2459,6 +2567,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define RING_FAULT_FAULT_TYPE(x) (((x) >> 1) & 0x3) #define RING_FAULT_VALID (1 << 0) #define DONE_REG _MMIO(0x40b0) +#define GEN12_GAM_DONE _MMIO(0xcf68) #define GEN8_PRIVATE_PAT_LO _MMIO(0x40e0) #define GEN8_PRIVATE_PAT_HI _MMIO(0x40e0 + 4) #define GEN10_PAT_INDEX(index) _MMIO(0x40e0 + (index) * 4) @@ -2489,7 +2598,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ +/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */ +#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8) +#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4) + #define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4) +#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2) #define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */ #define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28) #define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28) @@ -2602,6 +2716,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define FAULT_VA_HIGH_BITS (0xf << 0) #define FAULT_GTT_SEL (1 << 4) +#define GEN12_AUX_ERR_DBG _MMIO(0x43f4) + #define FPGA_DBG _MMIO(0x42300) #define FPGA_DBG_RM_NOCLAIM (1 << 31) @@ -2711,6 +2827,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030) #define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034) #define SCPD0 _MMIO(0x209c) /* 915+ only */ +#define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5) #define GEN2_IER _MMIO(0x20a0) #define GEN2_IIR _MMIO(0x20a4) #define GEN2_IMR _MMIO(0x20a8) @@ -2884,6 +3001,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050) #define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0) +#define GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7) #define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) #define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1 << 10) @@ -2962,6 +3080,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN11_GT_SUBSLICE_DISABLE _MMIO(0x913C) +#define GEN12_GT_DSS_ENABLE _MMIO(0x913C) + #define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050) #define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) #define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) @@ -3564,6 +3684,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _PALETTE_A 0xa000 #define _PALETTE_B 0xa800 #define _CHV_PALETTE_C 0xc000 +#define PALETTE_RED_MASK REG_GENMASK(23, 16) +#define PALETTE_GREEN_MASK REG_GENMASK(15, 8) +#define PALETTE_BLUE_MASK REG_GENMASK(7, 0) #define PALETTE(pipe, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + \ _PICK((pipe), _PALETTE_A, \ _PALETTE_B, _CHV_PALETTE_C) + \ @@ -4044,10 +4167,15 @@ enum { #define SARBUNIT_CLKGATE_DIS (1 << 5) #define RCCUNIT_CLKGATE_DIS (1 << 7) #define MSCUNIT_CLKGATE_DIS (1 << 10) +#define L3_CLKGATE_DIS REG_BIT(16) +#define L3_CR2X_CLKGATE_DIS REG_BIT(17) #define SUBSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9524) #define GWUNIT_CLKGATE_DIS (1 << 16) +#define SUBSLICE_UNIT_LEVEL_CLKGATE2 _MMIO(0x9528) +#define CPSSUNIT_CLKGATE_DIS REG_BIT(9) + #define UNSLICE_UNIT_LEVEL_CLKGATE _MMIO(0x9434) #define VFUNIT_CLKGATE_DIS (1 << 20) @@ -4141,6 +4269,7 @@ enum { #define _VTOTAL_A 0x6000c #define _VBLANK_A 0x60010 #define _VSYNC_A 0x60014 +#define _EXITLINE_A 0x60018 #define _PIPEASRC 0x6001c #define _BCLRPAT_A 0x60020 #define _VSYNCSHIFT_A 0x60028 @@ -4192,10 +4321,22 @@ enum { #define PIPESRC(trans) _MMIO_TRANS2(trans, _PIPEASRC) #define PIPE_MULT(trans) _MMIO_TRANS2(trans, _PIPE_MULT_A) -/* HSW+ eDP PSR registers */ -#define HSW_EDP_PSR_BASE 0x64800 -#define BDW_EDP_PSR_BASE 0x6f800 -#define EDP_PSR_CTL _MMIO(dev_priv->psr_mmio_base + 0) +#define EXITLINE(trans) _MMIO_TRANS2(trans, _EXITLINE_A) +#define EXITLINE_ENABLE REG_BIT(31) +#define EXITLINE_MASK REG_GENMASK(12, 0) +#define EXITLINE_SHIFT 0 + +/* + * HSW+ eDP PSR registers + * + * HSW PSR registers are relative to DDIA(_DDI_BUF_CTL_A + 0x800) with just one + * instance of it + */ +#define _HSW_EDP_PSR_BASE 0x64800 +#define _SRD_CTL_A 0x60800 +#define _SRD_CTL_EDP 0x6f800 +#define _PSR_ADJ(tran, reg) (_TRANS2(tran, reg) - dev_priv->hsw_psr_mmio_adjust) +#define EDP_PSR_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_CTL_A)) #define EDP_PSR_ENABLE (1 << 31) #define BDW_PSR_SINGLE_FRAME (1 << 30) #define EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK (1 << 29) /* SW can't modify */ @@ -4221,27 +4362,40 @@ enum { #define EDP_PSR_TP1_TIME_0us (3 << 4) #define EDP_PSR_IDLE_FRAME_SHIFT 0 -/* Bspec claims those aren't shifted but stay at 0x64800 */ +/* + * Until TGL, IMR/IIR are fixed at 0x648xx. On TGL+ those registers are relative + * to transcoder and bits defined for each one as if using no shift (i.e. as if + * it was for TRANSCODER_EDP) + */ #define EDP_PSR_IMR _MMIO(0x64834) #define EDP_PSR_IIR _MMIO(0x64838) -#define EDP_PSR_ERROR(shift) (1 << ((shift) + 2)) -#define EDP_PSR_POST_EXIT(shift) (1 << ((shift) + 1)) -#define EDP_PSR_PRE_ENTRY(shift) (1 << (shift)) -#define EDP_PSR_TRANSCODER_C_SHIFT 24 -#define EDP_PSR_TRANSCODER_B_SHIFT 16 -#define EDP_PSR_TRANSCODER_A_SHIFT 8 -#define EDP_PSR_TRANSCODER_EDP_SHIFT 0 - -#define EDP_PSR_AUX_CTL _MMIO(dev_priv->psr_mmio_base + 0x10) +#define _PSR_IMR_A 0x60814 +#define _PSR_IIR_A 0x60818 +#define TRANS_PSR_IMR(tran) _MMIO_TRANS2(tran, _PSR_IMR_A) +#define TRANS_PSR_IIR(tran) _MMIO_TRANS2(tran, _PSR_IIR_A) +#define _EDP_PSR_TRANS_SHIFT(trans) ((trans) == TRANSCODER_EDP ? \ + 0 : ((trans) - TRANSCODER_A + 1) * 8) +#define EDP_PSR_TRANS_MASK(trans) (0x7 << _EDP_PSR_TRANS_SHIFT(trans)) +#define EDP_PSR_ERROR(trans) (0x4 << _EDP_PSR_TRANS_SHIFT(trans)) +#define EDP_PSR_POST_EXIT(trans) (0x2 << _EDP_PSR_TRANS_SHIFT(trans)) +#define EDP_PSR_PRE_ENTRY(trans) (0x1 << _EDP_PSR_TRANS_SHIFT(trans)) + +#define _SRD_AUX_CTL_A 0x60810 +#define _SRD_AUX_CTL_EDP 0x6f810 +#define EDP_PSR_AUX_CTL(tran) _MMIO(_PSR_ADJ(tran, _SRD_AUX_CTL_A)) #define EDP_PSR_AUX_CTL_TIME_OUT_MASK (3 << 26) #define EDP_PSR_AUX_CTL_MESSAGE_SIZE_MASK (0x1f << 20) #define EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK (0xf << 16) #define EDP_PSR_AUX_CTL_ERROR_INTERRUPT (1 << 11) #define EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK (0x7ff) -#define EDP_PSR_AUX_DATA(i) _MMIO(dev_priv->psr_mmio_base + 0x14 + (i) * 4) /* 5 registers */ +#define _SRD_AUX_DATA_A 0x60814 +#define _SRD_AUX_DATA_EDP 0x6f814 +#define EDP_PSR_AUX_DATA(tran, i) _MMIO(_PSR_ADJ(tran, _SRD_AUX_DATA_A) + (i) + 4) /* 5 registers */ -#define EDP_PSR_STATUS _MMIO(dev_priv->psr_mmio_base + 0x40) +#define _SRD_STATUS_A 0x60840 +#define _SRD_STATUS_EDP 0x6f840 +#define EDP_PSR_STATUS(tran) _MMIO(_PSR_ADJ(tran, _SRD_STATUS_A)) #define EDP_PSR_STATUS_STATE_MASK (7 << 29) #define EDP_PSR_STATUS_STATE_SHIFT 29 #define EDP_PSR_STATUS_STATE_IDLE (0 << 29) @@ -4266,10 +4420,15 @@ enum { #define EDP_PSR_STATUS_SENDING_TP1 (1 << 4) #define EDP_PSR_STATUS_IDLE_MASK 0xf -#define EDP_PSR_PERF_CNT _MMIO(dev_priv->psr_mmio_base + 0x44) +#define _SRD_PERF_CNT_A 0x60844 +#define _SRD_PERF_CNT_EDP 0x6f844 +#define EDP_PSR_PERF_CNT(tran) _MMIO(_PSR_ADJ(tran, _SRD_PERF_CNT_A)) #define EDP_PSR_PERF_CNT_MASK 0xffffff -#define EDP_PSR_DEBUG _MMIO(dev_priv->psr_mmio_base + 0x60) /* PSR_MASK on SKL+ */ +/* PSR_MASK on SKL+ */ +#define _SRD_DEBUG_A 0x60860 +#define _SRD_DEBUG_EDP 0x6f860 +#define EDP_PSR_DEBUG(tran) _MMIO(_PSR_ADJ(tran, _SRD_DEBUG_A)) #define EDP_PSR_DEBUG_MASK_MAX_SLEEP (1 << 28) #define EDP_PSR_DEBUG_MASK_LPSP (1 << 27) #define EDP_PSR_DEBUG_MASK_MEMUP (1 << 26) @@ -4277,7 +4436,9 @@ enum { #define EDP_PSR_DEBUG_MASK_DISP_REG_WRITE (1 << 16) /* Reserved in ICL+ */ #define EDP_PSR_DEBUG_EXIT_ON_PIXEL_UNDERRUN (1 << 15) /* SKL+ */ -#define EDP_PSR2_CTL _MMIO(0x6f900) +#define _PSR2_CTL_A 0x60900 +#define _PSR2_CTL_EDP 0x6f900 +#define EDP_PSR2_CTL(tran) _MMIO_TRANS2(tran, _PSR2_CTL_A) #define EDP_PSR2_ENABLE (1 << 31) #define EDP_SU_TRACK_ENABLE (1 << 30) #define EDP_Y_COORDINATE_VALID (1 << 26) /* GLK and CNL+ */ @@ -4299,8 +4460,8 @@ enum { #define _PSR_EVENT_TRANS_B 0x61848 #define _PSR_EVENT_TRANS_C 0x62848 #define _PSR_EVENT_TRANS_D 0x63848 -#define _PSR_EVENT_TRANS_EDP 0x6F848 -#define PSR_EVENT(trans) _MMIO_TRANS2(trans, _PSR_EVENT_TRANS_A) +#define _PSR_EVENT_TRANS_EDP 0x6f848 +#define PSR_EVENT(tran) _MMIO_TRANS2(tran, _PSR_EVENT_TRANS_A) #define PSR_EVENT_PSR2_WD_TIMER_EXPIRE (1 << 17) #define PSR_EVENT_PSR2_DISABLED (1 << 16) #define PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN (1 << 15) @@ -4318,15 +4479,16 @@ enum { #define PSR_EVENT_LPSP_MODE_EXIT (1 << 1) #define PSR_EVENT_PSR_DISABLE (1 << 0) -#define EDP_PSR2_STATUS _MMIO(0x6f940) +#define _PSR2_STATUS_A 0x60940 +#define _PSR2_STATUS_EDP 0x6f940 +#define EDP_PSR2_STATUS(tran) _MMIO_TRANS2(tran, _PSR2_STATUS_A) #define EDP_PSR2_STATUS_STATE_MASK (0xf << 28) #define EDP_PSR2_STATUS_STATE_SHIFT 28 -#define _PSR2_SU_STATUS_0 0x6F914 -#define _PSR2_SU_STATUS_1 0x6F918 -#define _PSR2_SU_STATUS_2 0x6F91C -#define _PSR2_SU_STATUS(index) _MMIO(_PICK_EVEN((index), _PSR2_SU_STATUS_0, _PSR2_SU_STATUS_1)) -#define PSR2_SU_STATUS(frame) (_PSR2_SU_STATUS((frame) / 3)) +#define _PSR2_SU_STATUS_A 0x60914 +#define _PSR2_SU_STATUS_EDP 0x6f914 +#define _PSR2_SU_STATUS(tran, index) _MMIO(_TRANS2(tran, _PSR2_SU_STATUS_A) + (index) * 4) +#define PSR2_SU_STATUS(tran, frame) (_PSR2_SU_STATUS(tran, (frame) / 3)) #define PSR2_SU_STATUS_SHIFT(frame) (((frame) % 3) * 10) #define PSR2_SU_STATUS_MASK(frame) (0x3ff << PSR2_SU_STATUS_SHIFT(frame)) #define PSR2_SU_STATUS_FRAMES 8 @@ -4652,6 +4814,7 @@ enum { * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte * of the infoframe structure specified by CEA-861. */ #define VIDEO_DIP_DATA_SIZE 32 +#define VIDEO_DIP_GMP_DATA_SIZE 36 #define VIDEO_DIP_VSC_DATA_SIZE 36 #define VIDEO_DIP_PPS_DATA_SIZE 132 #define VIDEO_DIP_CTL _MMIO(0x61170) @@ -5488,45 +5651,9 @@ enum { */ #define _DPA_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64010) #define _DPA_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64014) -#define _DPA_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64018) -#define _DPA_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6401c) -#define _DPA_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64020) -#define _DPA_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64024) #define _DPB_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64110) #define _DPB_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64114) -#define _DPB_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64118) -#define _DPB_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6411c) -#define _DPB_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64120) -#define _DPB_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64124) - -#define _DPC_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64210) -#define _DPC_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64214) -#define _DPC_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64218) -#define _DPC_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6421c) -#define _DPC_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64220) -#define _DPC_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64224) - -#define _DPD_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64310) -#define _DPD_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64314) -#define _DPD_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64318) -#define _DPD_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6431c) -#define _DPD_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64320) -#define _DPD_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64324) - -#define _DPE_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64410) -#define _DPE_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64414) -#define _DPE_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64418) -#define _DPE_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6441c) -#define _DPE_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64420) -#define _DPE_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64424) - -#define _DPF_AUX_CH_CTL (DISPLAY_MMIO_BASE(dev_priv) + 0x64510) -#define _DPF_AUX_CH_DATA1 (DISPLAY_MMIO_BASE(dev_priv) + 0x64514) -#define _DPF_AUX_CH_DATA2 (DISPLAY_MMIO_BASE(dev_priv) + 0x64518) -#define _DPF_AUX_CH_DATA3 (DISPLAY_MMIO_BASE(dev_priv) + 0x6451c) -#define _DPF_AUX_CH_DATA4 (DISPLAY_MMIO_BASE(dev_priv) + 0x64520) -#define _DPF_AUX_CH_DATA5 (DISPLAY_MMIO_BASE(dev_priv) + 0x64524) #define DP_AUX_CH_CTL(aux_ch) _MMIO_PORT(aux_ch, _DPA_AUX_CH_CTL, _DPB_AUX_CH_CTL) #define DP_AUX_CH_DATA(aux_ch, i) _MMIO(_PORT(aux_ch, _DPA_AUX_CH_DATA1, _DPB_AUX_CH_DATA1) + (i) * 4) /* 5 registers */ @@ -5658,6 +5785,11 @@ enum { #define PIPECONF_CXSR_DOWNCLOCK (1 << 16) #define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14) #define PIPECONF_COLOR_RANGE_SELECT (1 << 13) +#define PIPECONF_OUTPUT_COLORSPACE_MASK (3 << 11) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_RGB (0 << 11) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_YUV601 (1 << 11) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_YUV709 (2 << 11) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW (1 << 11) /* hsw only */ #define PIPECONF_BPC_MASK (0x7 << 5) #define PIPECONF_8BPC (0 << 5) #define PIPECONF_10BPC (1 << 5) @@ -5745,12 +5877,13 @@ enum { #define _PIPEAGCMAX 0x70010 #define _PIPEBGCMAX 0x71010 +#define PIPEGCMAX_RGB_MASK REG_GENMASK(15, 0) #define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4) #define _PIPE_MISC_A 0x70030 #define _PIPE_MISC_B 0x71030 -#define PIPEMISC_YUV420_ENABLE (1 << 27) -#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) +#define PIPEMISC_YUV420_ENABLE (1 << 27) /* glk+ */ +#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */ #define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */ #define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11) #define PIPEMISC_DITHER_BPC_MASK (7 << 5) @@ -6207,6 +6340,7 @@ enum { #define CHV_CURSOR_C_OFFSET 0x700e0 #define IVB_CURSOR_B_OFFSET 0x71080 #define IVB_CURSOR_C_OFFSET 0x72080 +#define TGL_CURSOR_D_OFFSET 0x73080 /* Display A control */ #define _DSPACNTR 0x70180 @@ -7177,11 +7311,17 @@ enum { /* legacy palette */ #define _LGC_PALETTE_A 0x4a000 #define _LGC_PALETTE_B 0x4a800 +#define LGC_PALETTE_RED_MASK REG_GENMASK(23, 16) +#define LGC_PALETTE_GREEN_MASK REG_GENMASK(15, 8) +#define LGC_PALETTE_BLUE_MASK REG_GENMASK(7, 0) #define LGC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4) /* ilk/snb precision palette */ #define _PREC_PALETTE_A 0x4b000 #define _PREC_PALETTE_B 0x4c000 +#define PREC_PALETTE_RED_MASK REG_GENMASK(29, 20) +#define PREC_PALETTE_GREEN_MASK REG_GENMASK(19, 10) +#define PREC_PALETTE_BLUE_MASK REG_GENMASK(9, 0) #define PREC_PALETTE(pipe, i) _MMIO(_PIPE(pipe, _PREC_PALETTE_A, _PREC_PALETTE_B) + (i) * 4) #define _PREC_PIPEAGCMAX 0x4d000 @@ -7217,6 +7357,8 @@ enum { #define TGL_DMC_DEBUG_DC5_COUNT _MMIO(0x101084) #define TGL_DMC_DEBUG_DC6_COUNT _MMIO(0x101088) +#define DMC_DEBUG3 _MMIO(0x101090) + /* Display Internal Timeout Register */ #define RM_TIMEOUT _MMIO(0x42060) #define MMIO_TIMEOUT_US(us) ((us) << 0) @@ -7332,6 +7474,9 @@ enum { #define GEN8_PIPE_VSYNC (1 << 1) #define GEN8_PIPE_VBLANK (1 << 0) #define GEN9_PIPE_CURSOR_FAULT (1 << 11) +#define GEN11_PIPE_PLANE7_FAULT (1 << 22) +#define GEN11_PIPE_PLANE6_FAULT (1 << 21) +#define GEN11_PIPE_PLANE5_FAULT (1 << 20) #define GEN9_PIPE_PLANE4_FAULT (1 << 10) #define GEN9_PIPE_PLANE3_FAULT (1 << 9) #define GEN9_PIPE_PLANE2_FAULT (1 << 8) @@ -7351,6 +7496,11 @@ enum { GEN9_PIPE_PLANE3_FAULT | \ GEN9_PIPE_PLANE2_FAULT | \ GEN9_PIPE_PLANE1_FAULT) +#define GEN11_DE_PIPE_IRQ_FAULT_ERRORS \ + (GEN9_DE_PIPE_IRQ_FAULT_ERRORS | \ + GEN11_PIPE_PLANE7_FAULT | \ + GEN11_PIPE_PLANE6_FAULT | \ + GEN11_PIPE_PLANE5_FAULT) #define GEN8_DE_PORT_ISR _MMIO(0x44440) #define GEN8_DE_PORT_IMR _MMIO(0x44444) @@ -7370,6 +7520,12 @@ enum { #define GEN8_PORT_DP_A_HOTPLUG (1 << 3) #define BXT_DE_PORT_GMBUS (1 << 1) #define GEN8_AUX_CHANNEL_A (1 << 0) +#define TGL_DE_PORT_AUX_USBC6 (1 << 13) +#define TGL_DE_PORT_AUX_USBC5 (1 << 12) +#define TGL_DE_PORT_AUX_USBC4 (1 << 11) +#define TGL_DE_PORT_AUX_USBC3 (1 << 10) +#define TGL_DE_PORT_AUX_USBC2 (1 << 9) +#define TGL_DE_PORT_AUX_USBC1 (1 << 8) #define TGL_DE_PORT_AUX_DDIC (1 << 2) #define TGL_DE_PORT_AUX_DDIB (1 << 1) #define TGL_DE_PORT_AUX_DDIA (1 << 0) @@ -7558,10 +7714,17 @@ enum { #define BDW_DPRS_MASK_VBLANK_SRD (1 << 0) #define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B) -#define CHICKEN_TRANS_A _MMIO(0x420c0) -#define CHICKEN_TRANS_B _MMIO(0x420c4) -#define CHICKEN_TRANS_C _MMIO(0x420c8) -#define CHICKEN_TRANS_EDP _MMIO(0x420cc) +#define _CHICKEN_TRANS_A 0x420c0 +#define _CHICKEN_TRANS_B 0x420c4 +#define _CHICKEN_TRANS_C 0x420c8 +#define _CHICKEN_TRANS_EDP 0x420cc +#define _CHICKEN_TRANS_D 0x420d8 +#define CHICKEN_TRANS(trans) _MMIO(_PICK((trans), \ + [TRANSCODER_EDP] = _CHICKEN_TRANS_EDP, \ + [TRANSCODER_A] = _CHICKEN_TRANS_A, \ + [TRANSCODER_B] = _CHICKEN_TRANS_B, \ + [TRANSCODER_C] = _CHICKEN_TRANS_C, \ + [TRANSCODER_D] = _CHICKEN_TRANS_D)) #define VSC_DATA_SEL_SOFTWARE_CONTROL (1 << 25) /* GLK and CNL+ */ #define DDI_TRAINING_OVERRIDE_ENABLE (1 << 19) #define DDI_TRAINING_OVERRIDE_VALUE (1 << 18) @@ -7594,15 +7757,19 @@ enum { #define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7) #define SKL_DFSM _MMIO(0x51000) -#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) -#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) -#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23) -#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23) -#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23) -#define SKL_DFSM_PIPE_A_DISABLE (1 << 30) -#define SKL_DFSM_PIPE_B_DISABLE (1 << 21) -#define SKL_DFSM_PIPE_C_DISABLE (1 << 28) -#define TGL_DFSM_PIPE_D_DISABLE (1 << 22) +#define SKL_DFSM_DISPLAY_PM_DISABLE (1 << 27) +#define SKL_DFSM_DISPLAY_HDCP_DISABLE (1 << 25) +#define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) +#define SKL_DFSM_CDCLK_LIMIT_675 (0 << 23) +#define SKL_DFSM_CDCLK_LIMIT_540 (1 << 23) +#define SKL_DFSM_CDCLK_LIMIT_450 (2 << 23) +#define SKL_DFSM_CDCLK_LIMIT_337_5 (3 << 23) +#define ICL_DFSM_DMC_DISABLE (1 << 23) +#define SKL_DFSM_PIPE_A_DISABLE (1 << 30) +#define SKL_DFSM_PIPE_B_DISABLE (1 << 21) +#define SKL_DFSM_PIPE_C_DISABLE (1 << 28) +#define TGL_DFSM_PIPE_D_DISABLE (1 << 22) +#define CNL_DFSM_DISPLAY_DSC_DISABLE (1 << 7) #define SKL_DSSM _MMIO(0x51004) #define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31) @@ -7619,7 +7786,10 @@ enum { #define GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE (1 << 10) #define GEN9_CS_DEBUG_MODE1 _MMIO(0x20ec) +#define FF_DOP_CLOCK_GATE_DISABLE REG_BIT(1) #define GEN9_CTX_PREEMPT_REG _MMIO(0x2248) +#define GEN12_DISABLE_POSH_BUSY_FF_DOP_CG REG_BIT(11) + #define GEN8_CS_CHICKEN1 _MMIO(0x2580) #define GEN9_PREEMPT_3D_OBJECT_LEVEL (1 << 0) #define GEN9_PREEMPT_GPGPU_LEVEL(hi, lo) (((hi) << 2) | ((lo) << 1)) @@ -7644,6 +7814,7 @@ enum { #define GEN11_COMMON_SLICE_CHICKEN3 _MMIO(0x7304) #define GEN11_BLEND_EMB_FIX_DISABLE_IN_RCC (1 << 11) + #define GEN12_DISABLE_CPS_AWARE_COLOR_PIPE (1 << 9) #define HIZ_CHICKEN _MMIO(0x7018) # define CHV_HZ_8X8_MODE_IN_1X (1 << 15) @@ -7828,29 +7999,24 @@ enum { SDE_FDI_RXA_CPT) /* south display engine interrupt: ICP/TGP */ -#define SDE_TC6_HOTPLUG_TGP (1 << 29) -#define SDE_TC5_HOTPLUG_TGP (1 << 28) -#define SDE_TC4_HOTPLUG_ICP (1 << 27) -#define SDE_TC3_HOTPLUG_ICP (1 << 26) -#define SDE_TC2_HOTPLUG_ICP (1 << 25) -#define SDE_TC1_HOTPLUG_ICP (1 << 24) #define SDE_GMBUS_ICP (1 << 23) -#define SDE_DDIC_HOTPLUG_TGP (1 << 18) -#define SDE_DDIB_HOTPLUG_ICP (1 << 17) -#define SDE_DDIA_HOTPLUG_ICP (1 << 16) #define SDE_TC_HOTPLUG_ICP(tc_port) (1 << ((tc_port) + 24)) #define SDE_DDI_HOTPLUG_ICP(port) (1 << ((port) + 16)) -#define SDE_DDI_MASK_ICP (SDE_DDIB_HOTPLUG_ICP | \ - SDE_DDIA_HOTPLUG_ICP) -#define SDE_TC_MASK_ICP (SDE_TC4_HOTPLUG_ICP | \ - SDE_TC3_HOTPLUG_ICP | \ - SDE_TC2_HOTPLUG_ICP | \ - SDE_TC1_HOTPLUG_ICP) -#define SDE_DDI_MASK_TGP (SDE_DDIC_HOTPLUG_TGP | \ - SDE_DDI_MASK_ICP) -#define SDE_TC_MASK_TGP (SDE_TC6_HOTPLUG_TGP | \ - SDE_TC5_HOTPLUG_TGP | \ - SDE_TC_MASK_ICP) +#define SDE_DDI_MASK_ICP (SDE_DDI_HOTPLUG_ICP(PORT_B) | \ + SDE_DDI_HOTPLUG_ICP(PORT_A)) +#define SDE_TC_MASK_ICP (SDE_TC_HOTPLUG_ICP(PORT_TC4) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC3) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC2) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC1)) +#define SDE_DDI_MASK_TGP (SDE_DDI_HOTPLUG_ICP(PORT_C) | \ + SDE_DDI_HOTPLUG_ICP(PORT_B) | \ + SDE_DDI_HOTPLUG_ICP(PORT_A)) +#define SDE_TC_MASK_TGP (SDE_TC_HOTPLUG_ICP(PORT_TC6) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC5) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC4) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC3) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC2) | \ + SDE_TC_HOTPLUG_ICP(PORT_TC1)) #define SDEISR _MMIO(0xc4000) #define SDEIMR _MMIO(0xc4004) @@ -7917,26 +8083,13 @@ enum { * SHOTPLUG_CTL_DDI and SHOTPLUG_CTL_TC. */ -#define SHOTPLUG_CTL_DDI _MMIO(0xc4030) -#define TGP_DDIC_HPD_ENABLE (1 << 11) -#define TGP_DDIC_HPD_STATUS_MASK (3 << 8) -#define TGP_DDIC_HPD_NO_DETECT (0 << 8) -#define TGP_DDIC_HPD_SHORT_DETECT (1 << 8) -#define TGP_DDIC_HPD_LONG_DETECT (2 << 8) -#define TGP_DDIC_HPD_SHORT_LONG_DETECT (3 << 8) -#define ICP_DDIB_HPD_ENABLE (1 << 7) -#define ICP_DDIB_HPD_STATUS_MASK (3 << 4) -#define ICP_DDIB_HPD_NO_DETECT (0 << 4) -#define ICP_DDIB_HPD_SHORT_DETECT (1 << 4) -#define ICP_DDIB_HPD_LONG_DETECT (2 << 4) -#define ICP_DDIB_HPD_SHORT_LONG_DETECT (3 << 4) -#define ICP_DDIA_HPD_ENABLE (1 << 3) -#define ICP_DDIA_HPD_OP_DRIVE_1 (1 << 2) -#define ICP_DDIA_HPD_STATUS_MASK (3 << 0) -#define ICP_DDIA_HPD_NO_DETECT (0 << 0) -#define ICP_DDIA_HPD_SHORT_DETECT (1 << 0) -#define ICP_DDIA_HPD_LONG_DETECT (2 << 0) -#define ICP_DDIA_HPD_SHORT_LONG_DETECT (3 << 0) +#define SHOTPLUG_CTL_DDI _MMIO(0xc4030) +#define SHOTPLUG_CTL_DDI_HPD_ENABLE(port) (0x8 << (4 * (port))) +#define SHOTPLUG_CTL_DDI_HPD_STATUS_MASK(port) (0x3 << (4 * (port))) +#define SHOTPLUG_CTL_DDI_HPD_NO_DETECT(port) (0x0 << (4 * (port))) +#define SHOTPLUG_CTL_DDI_HPD_SHORT_DETECT(port) (0x1 << (4 * (port))) +#define SHOTPLUG_CTL_DDI_HPD_LONG_DETECT(port) (0x2 << (4 * (port))) +#define SHOTPLUG_CTL_DDI_HPD_SHORT_LONG_DETECT(port) (0x3 << (4 * (port))) #define SHOTPLUG_CTL_TC _MMIO(0xc4034) #define ICP_TC_HPD_ENABLE(tc_port) (8 << (tc_port) * 4) @@ -8047,14 +8200,15 @@ enum { #define ICP_TC_HPD_LONG_DETECT(tc_port) (2 << (tc_port) * 4) #define ICP_TC_HPD_SHORT_DETECT(tc_port) (1 << (tc_port) * 4) -#define ICP_DDI_HPD_ENABLE_MASK (ICP_DDIB_HPD_ENABLE | \ - ICP_DDIA_HPD_ENABLE) +#define ICP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \ + SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A)) #define ICP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC4) | \ ICP_TC_HPD_ENABLE(PORT_TC3) | \ ICP_TC_HPD_ENABLE(PORT_TC2) | \ ICP_TC_HPD_ENABLE(PORT_TC1)) -#define TGP_DDI_HPD_ENABLE_MASK (TGP_DDIC_HPD_ENABLE | \ - ICP_DDI_HPD_ENABLE_MASK) +#define TGP_DDI_HPD_ENABLE_MASK (SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_C) | \ + SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_B) | \ + SHOTPLUG_CTL_DDI_HPD_ENABLE(PORT_A)) #define TGP_TC_HPD_ENABLE_MASK (ICP_TC_HPD_ENABLE(PORT_TC6) | \ ICP_TC_HPD_ENABLE(PORT_TC5) | \ ICP_TC_HPD_ENABLE_MASK) @@ -8604,6 +8758,10 @@ enum { #define GEN9_PWRGT_MEDIA_STATUS_MASK (1 << 0) #define GEN9_PWRGT_RENDER_STATUS_MASK (1 << 1) +#define POWERGATE_ENABLE _MMIO(0xa210) +#define VDN_HCP_POWERGATE_ENABLE(n) BIT(((n) * 2) + 3) +#define VDN_MFX_POWERGATE_ENABLE(n) BIT(((n) * 2) + 4) + #define GTFIFODBG _MMIO(0x120000) #define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20) #define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13) @@ -8841,6 +8999,7 @@ enum { #define GEN9_SAGV_DISABLE 0x0 #define GEN9_SAGV_IS_DISABLED 0x1 #define GEN9_SAGV_ENABLE 0x3 +#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23 #define GEN6_PCODE_DATA _MMIO(0x138128) #define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8 #define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16 @@ -9104,6 +9263,10 @@ enum { #define HSW_AUD_CHICKENBIT _MMIO(0x65f10) #define SKL_AUD_CODEC_WAKE_SIGNAL (1 << 15) +#define AUD_FREQ_CNTRL _MMIO(0x65900) +#define AUD_PIN_BUF_CTL _MMIO(0x48414) +#define AUD_PIN_BUF_ENABLE REG_BIT(31) + /* * HSW - ICL power wells * @@ -9266,12 +9429,20 @@ enum skl_power_gate { /* HDCP Repeater Registers */ #define HDCP_REP_CTL _MMIO(0x66d00) +#define HDCP_TRANSA_REP_PRESENT BIT(31) +#define HDCP_TRANSB_REP_PRESENT BIT(30) +#define HDCP_TRANSC_REP_PRESENT BIT(29) +#define HDCP_TRANSD_REP_PRESENT BIT(28) #define HDCP_DDIB_REP_PRESENT BIT(30) #define HDCP_DDIA_REP_PRESENT BIT(29) #define HDCP_DDIC_REP_PRESENT BIT(28) #define HDCP_DDID_REP_PRESENT BIT(27) #define HDCP_DDIF_REP_PRESENT BIT(26) #define HDCP_DDIE_REP_PRESENT BIT(25) +#define HDCP_TRANSA_SHA1_M0 (1 << 20) +#define HDCP_TRANSB_SHA1_M0 (2 << 20) +#define HDCP_TRANSC_SHA1_M0 (3 << 20) +#define HDCP_TRANSD_SHA1_M0 (4 << 20) #define HDCP_DDIB_SHA1_M0 (1 << 20) #define HDCP_DDIA_SHA1_M0 (2 << 20) #define HDCP_DDIC_SHA1_M0 (3 << 20) @@ -9311,15 +9482,92 @@ enum skl_power_gate { _PORTE_HDCP_AUTHENC, \ _PORTF_HDCP_AUTHENC) + (x)) #define PORT_HDCP_CONF(port) _PORT_HDCP_AUTHENC(port, 0x0) +#define _TRANSA_HDCP_CONF 0x66400 +#define _TRANSB_HDCP_CONF 0x66500 +#define TRANS_HDCP_CONF(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_CONF, \ + _TRANSB_HDCP_CONF) +#define HDCP_CONF(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_CONF(trans) : \ + PORT_HDCP_CONF(port)) + #define HDCP_CONF_CAPTURE_AN BIT(0) #define HDCP_CONF_AUTH_AND_ENC (BIT(1) | BIT(0)) #define PORT_HDCP_ANINIT(port) _PORT_HDCP_AUTHENC(port, 0x4) +#define _TRANSA_HDCP_ANINIT 0x66404 +#define _TRANSB_HDCP_ANINIT 0x66504 +#define TRANS_HDCP_ANINIT(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_ANINIT, \ + _TRANSB_HDCP_ANINIT) +#define HDCP_ANINIT(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_ANINIT(trans) : \ + PORT_HDCP_ANINIT(port)) + #define PORT_HDCP_ANLO(port) _PORT_HDCP_AUTHENC(port, 0x8) +#define _TRANSA_HDCP_ANLO 0x66408 +#define _TRANSB_HDCP_ANLO 0x66508 +#define TRANS_HDCP_ANLO(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANLO, \ + _TRANSB_HDCP_ANLO) +#define HDCP_ANLO(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_ANLO(trans) : \ + PORT_HDCP_ANLO(port)) + #define PORT_HDCP_ANHI(port) _PORT_HDCP_AUTHENC(port, 0xC) +#define _TRANSA_HDCP_ANHI 0x6640C +#define _TRANSB_HDCP_ANHI 0x6650C +#define TRANS_HDCP_ANHI(trans) _MMIO_TRANS(trans, _TRANSA_HDCP_ANHI, \ + _TRANSB_HDCP_ANHI) +#define HDCP_ANHI(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_ANHI(trans) : \ + PORT_HDCP_ANHI(port)) + #define PORT_HDCP_BKSVLO(port) _PORT_HDCP_AUTHENC(port, 0x10) +#define _TRANSA_HDCP_BKSVLO 0x66410 +#define _TRANSB_HDCP_BKSVLO 0x66510 +#define TRANS_HDCP_BKSVLO(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_BKSVLO, \ + _TRANSB_HDCP_BKSVLO) +#define HDCP_BKSVLO(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_BKSVLO(trans) : \ + PORT_HDCP_BKSVLO(port)) + #define PORT_HDCP_BKSVHI(port) _PORT_HDCP_AUTHENC(port, 0x14) +#define _TRANSA_HDCP_BKSVHI 0x66414 +#define _TRANSB_HDCP_BKSVHI 0x66514 +#define TRANS_HDCP_BKSVHI(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_BKSVHI, \ + _TRANSB_HDCP_BKSVHI) +#define HDCP_BKSVHI(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_BKSVHI(trans) : \ + PORT_HDCP_BKSVHI(port)) + #define PORT_HDCP_RPRIME(port) _PORT_HDCP_AUTHENC(port, 0x18) +#define _TRANSA_HDCP_RPRIME 0x66418 +#define _TRANSB_HDCP_RPRIME 0x66518 +#define TRANS_HDCP_RPRIME(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_RPRIME, \ + _TRANSB_HDCP_RPRIME) +#define HDCP_RPRIME(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_RPRIME(trans) : \ + PORT_HDCP_RPRIME(port)) + #define PORT_HDCP_STATUS(port) _PORT_HDCP_AUTHENC(port, 0x1C) +#define _TRANSA_HDCP_STATUS 0x6641C +#define _TRANSB_HDCP_STATUS 0x6651C +#define TRANS_HDCP_STATUS(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP_STATUS, \ + _TRANSB_HDCP_STATUS) +#define HDCP_STATUS(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP_STATUS(trans) : \ + PORT_HDCP_STATUS(port)) + #define HDCP_STATUS_STREAM_A_ENC BIT(31) #define HDCP_STATUS_STREAM_B_ENC BIT(30) #define HDCP_STATUS_STREAM_C_ENC BIT(29) @@ -9346,23 +9594,44 @@ enum skl_power_gate { _PORTD_HDCP2_BASE, \ _PORTE_HDCP2_BASE, \ _PORTF_HDCP2_BASE) + (x)) - -#define HDCP2_AUTH_DDI(port) _PORT_HDCP2_BASE(port, 0x98) +#define PORT_HDCP2_AUTH(port) _PORT_HDCP2_BASE(port, 0x98) +#define _TRANSA_HDCP2_AUTH 0x66498 +#define _TRANSB_HDCP2_AUTH 0x66598 +#define TRANS_HDCP2_AUTH(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_AUTH, \ + _TRANSB_HDCP2_AUTH) #define AUTH_LINK_AUTHENTICATED BIT(31) #define AUTH_LINK_TYPE BIT(30) #define AUTH_FORCE_CLR_INPUTCTR BIT(19) #define AUTH_CLR_KEYS BIT(18) - -#define HDCP2_CTL_DDI(port) _PORT_HDCP2_BASE(port, 0xB0) +#define HDCP2_AUTH(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP2_AUTH(trans) : \ + PORT_HDCP2_AUTH(port)) + +#define PORT_HDCP2_CTL(port) _PORT_HDCP2_BASE(port, 0xB0) +#define _TRANSA_HDCP2_CTL 0x664B0 +#define _TRANSB_HDCP2_CTL 0x665B0 +#define TRANS_HDCP2_CTL(trans) _MMIO_TRANS(trans, _TRANSA_HDCP2_CTL, \ + _TRANSB_HDCP2_CTL) #define CTL_LINK_ENCRYPTION_REQ BIT(31) - -#define HDCP2_STATUS_DDI(port) _PORT_HDCP2_BASE(port, 0xB4) -#define STREAM_ENCRYPTION_STATUS_A BIT(31) -#define STREAM_ENCRYPTION_STATUS_B BIT(30) -#define STREAM_ENCRYPTION_STATUS_C BIT(29) +#define HDCP2_CTL(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP2_CTL(trans) : \ + PORT_HDCP2_CTL(port)) + +#define PORT_HDCP2_STATUS(port) _PORT_HDCP2_BASE(port, 0xB4) +#define _TRANSA_HDCP2_STATUS 0x664B4 +#define _TRANSB_HDCP2_STATUS 0x665B4 +#define TRANS_HDCP2_STATUS(trans) _MMIO_TRANS(trans, \ + _TRANSA_HDCP2_STATUS, \ + _TRANSB_HDCP2_STATUS) #define LINK_TYPE_STATUS BIT(22) #define LINK_AUTH_STATUS BIT(21) #define LINK_ENCRYPTION_STATUS BIT(20) +#define HDCP2_STATUS(dev_priv, trans, port) \ + (INTEL_GEN(dev_priv) >= 12 ? \ + TRANS_HDCP2_STATUS(trans) : \ + PORT_HDCP2_STATUS(port)) /* Per-pipe DDI Function Control */ #define _TRANS_DDI_FUNC_CTL_A 0x60400 @@ -9402,6 +9671,9 @@ enum skl_power_gate { #define TRANS_DDI_EDP_INPUT_A_ONOFF (4 << 12) #define TRANS_DDI_EDP_INPUT_B_ONOFF (5 << 12) #define TRANS_DDI_EDP_INPUT_C_ONOFF (6 << 12) +#define TRANS_DDI_MST_TRANSPORT_SELECT_MASK REG_GENMASK(11, 10) +#define TRANS_DDI_MST_TRANSPORT_SELECT(trans) \ + REG_FIELD_PREP(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, trans) #define TRANS_DDI_HDCP_SIGNALLING (1 << 9) #define TRANS_DDI_DP_VC_PAYLOAD_ALLOC (1 << 8) #define TRANS_DDI_HDMI_SCRAMBLER_CTS_ENABLE (1 << 7) @@ -9429,7 +9701,9 @@ enum skl_power_gate { /* DisplayPort Transport Control */ #define _DP_TP_CTL_A 0x64040 #define _DP_TP_CTL_B 0x64140 +#define _TGL_DP_TP_CTL_A 0x60540 #define DP_TP_CTL(port) _MMIO_PORT(port, _DP_TP_CTL_A, _DP_TP_CTL_B) +#define TGL_DP_TP_CTL(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_CTL_A) #define DP_TP_CTL_ENABLE (1 << 31) #define DP_TP_CTL_FEC_ENABLE (1 << 30) #define DP_TP_CTL_MODE_SST (0 << 27) @@ -9449,7 +9723,9 @@ enum skl_power_gate { /* DisplayPort Transport Status */ #define _DP_TP_STATUS_A 0x64044 #define _DP_TP_STATUS_B 0x64144 +#define _TGL_DP_TP_STATUS_A 0x60544 #define DP_TP_STATUS(port) _MMIO_PORT(port, _DP_TP_STATUS_A, _DP_TP_STATUS_B) +#define TGL_DP_TP_STATUS(tran) _MMIO_TRANS2((tran), _TGL_DP_TP_STATUS_A) #define DP_TP_STATUS_FEC_ENABLE_LIVE (1 << 28) #define DP_TP_STATUS_IDLE_DONE (1 << 25) #define DP_TP_STATUS_ACT_SENT (1 << 24) @@ -9604,17 +9880,7 @@ enum skl_power_gate { #define _TRANSC_MSA_MISC 0x62410 #define _TRANS_EDP_MSA_MISC 0x6f410 #define TRANS_MSA_MISC(tran) _MMIO_TRANS2(tran, _TRANSA_MSA_MISC) - -#define TRANS_MSA_SYNC_CLK (1 << 0) -#define TRANS_MSA_SAMPLING_444 (2 << 1) -#define TRANS_MSA_CLRSP_YCBCR (2 << 3) -#define TRANS_MSA_6_BPC (0 << 5) -#define TRANS_MSA_8_BPC (1 << 5) -#define TRANS_MSA_10_BPC (2 << 5) -#define TRANS_MSA_12_BPC (3 << 5) -#define TRANS_MSA_16_BPC (4 << 5) -#define TRANS_MSA_CEA_RANGE (1 << 3) -#define TRANS_MSA_USE_VSC_SDP (1 << 14) +/* See DP_MSA_MISC_* for the bit definitions */ /* LCPLL Control */ #define LCPLL_CTL _MMIO(0x130040) @@ -9655,7 +9921,10 @@ enum skl_power_gate { #define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20) #define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19) #define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) +#define ICL_CDCLK_CD2X_PIPE(pipe) (_PICK(pipe, 0, 2, 6) << 19) #define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19) +#define TGL_CDCLK_CD2X_PIPE(pipe) BXT_CDCLK_CD2X_PIPE(pipe) +#define TGL_CDCLK_CD2X_PIPE_NONE ICL_CDCLK_CD2X_PIPE_NONE #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16) #define CDCLK_FREQ_DECIMAL_MASK (0x7ff) @@ -9976,6 +10245,166 @@ enum skl_power_gate { _TGL_DPLL1_CFGCR1, \ _TGL_TBTPLL_CFGCR1) +#define _DKL_PHY1_BASE 0x168000 +#define _DKL_PHY2_BASE 0x169000 +#define _DKL_PHY3_BASE 0x16A000 +#define _DKL_PHY4_BASE 0x16B000 +#define _DKL_PHY5_BASE 0x16C000 +#define _DKL_PHY6_BASE 0x16D000 + +/* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */ +#define _DKL_PLL_DIV0 0x200 +#define DKL_PLL_DIV0_INTEG_COEFF(x) ((x) << 16) +#define DKL_PLL_DIV0_INTEG_COEFF_MASK (0x1F << 16) +#define DKL_PLL_DIV0_PROP_COEFF(x) ((x) << 12) +#define DKL_PLL_DIV0_PROP_COEFF_MASK (0xF << 12) +#define DKL_PLL_DIV0_FBPREDIV_SHIFT (8) +#define DKL_PLL_DIV0_FBPREDIV(x) ((x) << DKL_PLL_DIV0_FBPREDIV_SHIFT) +#define DKL_PLL_DIV0_FBPREDIV_MASK (0xF << DKL_PLL_DIV0_FBPREDIV_SHIFT) +#define DKL_PLL_DIV0_FBDIV_INT(x) ((x) << 0) +#define DKL_PLL_DIV0_FBDIV_INT_MASK (0xFF << 0) +#define DKL_PLL_DIV0(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_PLL_DIV0) + +#define _DKL_PLL_DIV1 0x204 +#define DKL_PLL_DIV1_IREF_TRIM(x) ((x) << 16) +#define DKL_PLL_DIV1_IREF_TRIM_MASK (0x1F << 16) +#define DKL_PLL_DIV1_TDC_TARGET_CNT(x) ((x) << 0) +#define DKL_PLL_DIV1_TDC_TARGET_CNT_MASK (0xFF << 0) +#define DKL_PLL_DIV1(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_PLL_DIV1) + +#define _DKL_PLL_SSC 0x210 +#define DKL_PLL_SSC_IREF_NDIV_RATIO(x) ((x) << 29) +#define DKL_PLL_SSC_IREF_NDIV_RATIO_MASK (0x7 << 29) +#define DKL_PLL_SSC_STEP_LEN(x) ((x) << 16) +#define DKL_PLL_SSC_STEP_LEN_MASK (0xFF << 16) +#define DKL_PLL_SSC_STEP_NUM(x) ((x) << 11) +#define DKL_PLL_SSC_STEP_NUM_MASK (0x7 << 11) +#define DKL_PLL_SSC_EN (1 << 9) +#define DKL_PLL_SSC(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_PLL_SSC) + +#define _DKL_PLL_BIAS 0x214 +#define DKL_PLL_BIAS_FRAC_EN_H (1 << 30) +#define DKL_PLL_BIAS_FBDIV_SHIFT (8) +#define DKL_PLL_BIAS_FBDIV_FRAC(x) ((x) << DKL_PLL_BIAS_FBDIV_SHIFT) +#define DKL_PLL_BIAS_FBDIV_FRAC_MASK (0x3FFFFF << DKL_PLL_BIAS_FBDIV_SHIFT) +#define DKL_PLL_BIAS(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_PLL_BIAS) + +#define _DKL_PLL_TDC_COLDST_BIAS 0x218 +#define DKL_PLL_TDC_SSC_STEP_SIZE(x) ((x) << 8) +#define DKL_PLL_TDC_SSC_STEP_SIZE_MASK (0xFF << 8) +#define DKL_PLL_TDC_FEED_FWD_GAIN(x) ((x) << 0) +#define DKL_PLL_TDC_FEED_FWD_GAIN_MASK (0xFF << 0) +#define DKL_PLL_TDC_COLDST_BIAS(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_PLL_TDC_COLDST_BIAS) + +#define _DKL_REFCLKIN_CTL 0x12C +/* Bits are the same as MG_REFCLKIN_CTL */ +#define DKL_REFCLKIN_CTL(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_REFCLKIN_CTL) + +#define _DKL_CLKTOP2_HSCLKCTL 0xD4 +/* Bits are the same as MG_CLKTOP2_HSCLKCTL */ +#define DKL_CLKTOP2_HSCLKCTL(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_CLKTOP2_HSCLKCTL) + +#define _DKL_CLKTOP2_CORECLKCTL1 0xD8 +/* Bits are the same as MG_CLKTOP2_CORECLKCTL1 */ +#define DKL_CLKTOP2_CORECLKCTL1(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_CLKTOP2_CORECLKCTL1) + +#define _DKL_TX_DPCNTL0 0x2C0 +#define DKL_TX_PRESHOOT_COEFF(x) ((x) << 13) +#define DKL_TX_PRESHOOT_COEFF_MASK (0x1f << 13) +#define DKL_TX_DE_EMPHASIS_COEFF(x) ((x) << 8) +#define DKL_TX_DE_EMPAHSIS_COEFF_MASK (0x1f << 8) +#define DKL_TX_VSWING_CONTROL(x) ((x) << 0) +#define DKL_TX_VSWING_CONTROL_MASK (0x7 << 0) +#define DKL_TX_DPCNTL0(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_DPCNTL0) + +#define _DKL_TX_DPCNTL1 0x2C4 +/* Bits are the same as DKL_TX_DPCNTRL0 */ +#define DKL_TX_DPCNTL1(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_DPCNTL1) + +#define _DKL_TX_DPCNTL2 0x2C8 +#define DKL_TX_DP20BITMODE (1 << 2) +#define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_DPCNTL2) + +#define _DKL_TX_FW_CALIB 0x2F8 +#define DKL_TX_CFG_DISABLE_WAIT_INIT (1 << 7) +#define DKL_TX_FW_CALIB(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_FW_CALIB) + +#define _DKL_TX_PMD_LANE_SUS 0xD00 +#define DKL_TX_PMD_LANE_SUS(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_PMD_LANE_SUS) + +#define _DKL_TX_DW17 0xDC4 +#define DKL_TX_DW17(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_DW17) + +#define _DKL_TX_DW18 0xDC8 +#define DKL_TX_DW18(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_TX_DW18) + +#define _DKL_DP_MODE 0xA0 +#define DKL_DP_MODE(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_DP_MODE) + +#define _DKL_CMN_UC_DW27 0x36C +#define DKL_CMN_UC_DW27_UC_HEALTH (0x1 << 15) +#define DKL_CMN_UC_DW_27(tc_port) _MMIO(_PORT(tc_port, \ + _DKL_PHY1_BASE, \ + _DKL_PHY2_BASE) + \ + _DKL_CMN_UC_DW27) + +/* + * Each Dekel PHY is addressed through a 4KB aperture. Each PHY has more than + * 4KB of register space, so a separate index is programmed in HIP_INDEX_REG0 + * or HIP_INDEX_REG1, based on the port number, to set the upper 2 address + * bits that point the 4KB window into the full PHY register space. + */ +#define _HIP_INDEX_REG0 0x1010A0 +#define _HIP_INDEX_REG1 0x1010A4 +#define HIP_INDEX_REG(tc_port) _MMIO((tc_port) < 4 ? _HIP_INDEX_REG0 \ + : _HIP_INDEX_REG1) +#define _HIP_INDEX_SHIFT(tc_port) (8 * ((tc_port) % 4)) +#define HIP_INDEX_VAL(tc_port, val) ((val) << _HIP_INDEX_SHIFT(tc_port)) + /* BXT display engine PLL */ #define BXT_DE_PLL_CTL _MMIO(0x6d000) #define BXT_DE_PLL_RATIO(x) (x) /* {60,65,100} * 19.2MHz */ @@ -9990,6 +10419,8 @@ enum skl_power_gate { /* GEN9 DC */ #define DC_STATE_EN _MMIO(0x45504) #define DC_STATE_DISABLE 0 +#define DC_STATE_EN_DC3CO REG_BIT(30) +#define DC_STATE_DC3CO_STATUS REG_BIT(29) #define DC_STATE_EN_UPTO_DC5 (1 << 0) #define DC_STATE_EN_DC9 (1 << 3) #define DC_STATE_EN_UPTO_DC6 (2 << 0) @@ -10118,11 +10549,11 @@ enum skl_power_gate { #define _PIPE_A_CSC_COEFF_BV 0x49024 #define _PIPE_A_CSC_MODE 0x49028 -#define ICL_CSC_ENABLE (1 << 31) -#define ICL_OUTPUT_CSC_ENABLE (1 << 30) -#define CSC_BLACK_SCREEN_OFFSET (1 << 2) -#define CSC_POSITION_BEFORE_GAMMA (1 << 1) -#define CSC_MODE_YUV_TO_RGB (1 << 0) +#define ICL_CSC_ENABLE (1 << 31) /* icl+ */ +#define ICL_OUTPUT_CSC_ENABLE (1 << 30) /* icl+ */ +#define CSC_BLACK_SCREEN_OFFSET (1 << 2) /* ilk/snb */ +#define CSC_POSITION_BEFORE_GAMMA (1 << 1) /* pre-glk */ +#define CSC_MODE_YUV_TO_RGB (1 << 0) /* ilk/snb */ #define _PIPE_A_CSC_PREOFF_HI 0x49030 #define _PIPE_A_CSC_PREOFF_ME 0x49034 @@ -10238,6 +10669,9 @@ enum skl_power_gate { #define _PAL_PREC_GC_MAX_A 0x4A410 #define _PAL_PREC_GC_MAX_B 0x4AC10 #define _PAL_PREC_GC_MAX_C 0x4B410 +#define PREC_PAL_DATA_RED_MASK REG_GENMASK(29, 20) +#define PREC_PAL_DATA_GREEN_MASK REG_GENMASK(19, 10) +#define PREC_PAL_DATA_BLUE_MASK REG_GENMASK(9, 0) #define _PAL_PREC_EXT_GC_MAX_A 0x4A420 #define _PAL_PREC_EXT_GC_MAX_B 0x4AC20 #define _PAL_PREC_EXT_GC_MAX_C 0x4B420 @@ -10290,6 +10724,9 @@ enum skl_power_gate { #define CGM_PIPE_MODE_GAMMA (1 << 2) #define CGM_PIPE_MODE_CSC (1 << 1) #define CGM_PIPE_MODE_DEGAMMA (1 << 0) +#define CGM_PIPE_GAMMA_RED_MASK REG_GENMASK(9, 0) +#define CGM_PIPE_GAMMA_GREEN_MASK REG_GENMASK(25, 16) +#define CGM_PIPE_GAMMA_BLUE_MASK REG_GENMASK(9, 0) #define _CGM_PIPE_B_CSC_COEFF01 (VLV_DISPLAY_BASE + 0x69900) #define _CGM_PIPE_B_CSC_COEFF23 (VLV_DISPLAY_BASE + 0x69904) @@ -11537,16 +11974,31 @@ enum skl_power_gate { #define PORT_TX_DFLEXDPSP(fia) _MMIO_FIA((fia), 0x008A0) #define MODULAR_FIA_MASK (1 << 4) -#define TC_LIVE_STATE_TBT(tc_port) (1 << ((tc_port) * 8 + 6)) -#define TC_LIVE_STATE_TC(tc_port) (1 << ((tc_port) * 8 + 5)) -#define DP_LANE_ASSIGNMENT_SHIFT(tc_port) ((tc_port) * 8) -#define DP_LANE_ASSIGNMENT_MASK(tc_port) (0xf << ((tc_port) * 8)) -#define DP_LANE_ASSIGNMENT(tc_port, x) ((x) << ((tc_port) * 8)) +#define TC_LIVE_STATE_TBT(idx) (1 << ((idx) * 8 + 6)) +#define TC_LIVE_STATE_TC(idx) (1 << ((idx) * 8 + 5)) +#define DP_LANE_ASSIGNMENT_SHIFT(idx) ((idx) * 8) +#define DP_LANE_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 8)) +#define DP_LANE_ASSIGNMENT(idx, x) ((x) << ((idx) * 8)) #define PORT_TX_DFLEXDPPMS(fia) _MMIO_FIA((fia), 0x00890) -#define DP_PHY_MODE_STATUS_COMPLETED(tc_port) (1 << (tc_port)) +#define DP_PHY_MODE_STATUS_COMPLETED(idx) (1 << (idx)) #define PORT_TX_DFLEXDPCSSS(fia) _MMIO_FIA((fia), 0x00894) -#define DP_PHY_MODE_STATUS_NOT_SAFE(tc_port) (1 << (tc_port)) +#define DP_PHY_MODE_STATUS_NOT_SAFE(idx) (1 << (idx)) + +#define PORT_TX_DFLEXPA1(fia) _MMIO_FIA((fia), 0x00880) +#define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4) +#define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4)) +#define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4)) + +/* This register controls the Display State Buffer (DSB) engines. */ +#define _DSBSL_INSTANCE_BASE 0x70B00 +#define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \ + (pipe) * 0x1000 + (id) * 100) +#define DSB_HEAD(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x0) +#define DSB_TAIL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x4) +#define DSB_CTRL(pipe, id) _MMIO(DSBSL_INSTANCE(pipe, id) + 0x8) +#define DSB_ENABLE (1 << 31) +#define DSB_STATUS (1 << 0) #endif /* _I915_REG_H_ */ diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index bc828a9ace84..bbd71af00a91 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -31,6 +31,8 @@ #include "gem/i915_gem_context.h" #include "gt/intel_context.h" +#include "gt/intel_ring.h" +#include "gt/intel_rps.h" #include "i915_active.h" #include "i915_drv.h" @@ -169,16 +171,17 @@ remove_from_client(struct i915_request *request) { struct drm_i915_file_private *file_priv; - file_priv = READ_ONCE(request->file_priv); - if (!file_priv) + if (!READ_ONCE(request->file_priv)) return; - spin_lock(&file_priv->mm.lock); - if (request->file_priv) { + rcu_read_lock(); + file_priv = xchg(&request->file_priv, NULL); + if (file_priv) { + spin_lock(&file_priv->mm.lock); list_del(&request->client_link); - request->file_priv = NULL; + spin_unlock(&file_priv->mm.lock); } - spin_unlock(&file_priv->mm.lock); + rcu_read_unlock(); } static void free_capture_list(struct i915_request *request) @@ -205,21 +208,18 @@ static void remove_from_engine(struct i915_request *rq) * check that the rq still belongs to the newly locked engine. */ locked = READ_ONCE(rq->engine); - spin_lock(&locked->active.lock); + spin_lock_irq(&locked->active.lock); while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { spin_unlock(&locked->active.lock); spin_lock(&engine->active.lock); locked = engine; } list_del(&rq->sched.link); - spin_unlock(&locked->active.lock); + spin_unlock_irq(&locked->active.lock); } -static bool i915_request_retire(struct i915_request *rq) +bool i915_request_retire(struct i915_request *rq) { - struct i915_active_request *active, *next; - - lockdep_assert_held(&rq->timeline->mutex); if (!i915_request_completed(rq)) return false; @@ -240,41 +240,11 @@ static bool i915_request_retire(struct i915_request *rq) * Note this requires that we are always called in request * completion order. */ - GEM_BUG_ON(!list_is_first(&rq->link, &rq->timeline->requests)); + GEM_BUG_ON(!list_is_first(&rq->link, + &i915_request_timeline(rq)->requests)); rq->ring->head = rq->postfix; /* - * Walk through the active list, calling retire on each. This allows - * objects to track their GPU activity and mark themselves as idle - * when their *last* active request is completed (updating state - * tracking lists for eviction, active references for GEM, etc). - * - * As the ->retire() may free the node, we decouple it first and - * pass along the auxiliary information (to avoid dereferencing - * the node after the callback). - */ - list_for_each_entry_safe(active, next, &rq->active_list, link) { - /* - * In microbenchmarks or focusing upon time inside the kernel, - * we may spend an inordinate amount of time simply handling - * the retirement of requests and processing their callbacks. - * Of which, this loop itself is particularly hot due to the - * cache misses when jumping around the list of - * i915_active_request. So we try to keep this loop as - * streamlined as possible and also prefetch the next - * i915_active_request to try and hide the likely cache miss. - */ - prefetchw(next); - - INIT_LIST_HEAD(&active->link); - RCU_INIT_POINTER(active->request, NULL); - - active->retire(active, rq); - } - - local_irq_disable(); - - /* * We only loosely track inflight requests across preemption, * and so we may find ourselves attempting to retire a _completed_ * request that we have removed from the HW and put back on a run @@ -282,24 +252,22 @@ static bool i915_request_retire(struct i915_request *rq) */ remove_from_engine(rq); - spin_lock(&rq->lock); + spin_lock_irq(&rq->lock); i915_request_mark_complete(rq); if (!i915_request_signaled(rq)) dma_fence_signal_locked(&rq->fence); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) i915_request_cancel_breadcrumb(rq); if (i915_request_has_waitboost(rq)) { - GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); - atomic_dec(&rq->i915->gt_pm.rps.num_waiters); + GEM_BUG_ON(!atomic_read(&rq->engine->gt->rps.num_waiters)); + atomic_dec(&rq->engine->gt->rps.num_waiters); } if (!test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags)) { set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); __notify_execute_cb(rq); } GEM_BUG_ON(!list_empty(&rq->execute_cb)); - spin_unlock(&rq->lock); - - local_irq_enable(); + spin_unlock_irq(&rq->lock); remove_from_client(rq); list_del(&rq->link); @@ -316,7 +284,7 @@ static bool i915_request_retire(struct i915_request *rq) void i915_request_retire_upto(struct i915_request *rq) { - struct intel_timeline * const tl = rq->timeline; + struct intel_timeline * const tl = i915_request_timeline(rq); struct i915_request *tmp; GEM_TRACE("%s fence %llx:%lld, current %d\n", @@ -324,7 +292,6 @@ void i915_request_retire_upto(struct i915_request *rq) rq->fence.context, rq->fence.seqno, hwsp_seqno(rq)); - lockdep_assert_held(&tl->mutex); GEM_BUG_ON(!i915_request_completed(rq)); do { @@ -680,9 +647,12 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) rq->gem_context = ce->gem_context; rq->engine = ce->engine; rq->ring = ce->ring; - rq->timeline = tl; + rq->execution_mask = ce->engine->mask; + + rcu_assign_pointer(rq->timeline, tl); rq->hwsp_seqno = tl->hwsp_seqno; rq->hwsp_cacheline = tl->hwsp_cacheline; + rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */ spin_lock_init(&rq->lock); @@ -700,9 +670,7 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp) rq->batch = NULL; rq->capture_list = NULL; rq->flags = 0; - rq->execution_mask = ALL_ENGINES; - INIT_LIST_HEAD(&rq->active_list); INIT_LIST_HEAD(&rq->execute_cb); /* @@ -741,7 +709,6 @@ err_unwind: ce->ring->emit = rq->head; /* Make sure we didn't add ourselves to external state before freeing */ - GEM_BUG_ON(!list_empty(&rq->active_list)); GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); @@ -786,16 +753,43 @@ err_unlock: static int i915_request_await_start(struct i915_request *rq, struct i915_request *signal) { - if (list_is_first(&signal->link, &signal->timeline->requests)) - return 0; + struct intel_timeline *tl; + struct dma_fence *fence; + int err; - signal = list_prev_entry(signal, link); - if (intel_timeline_sync_is_later(rq->timeline, &signal->fence)) + GEM_BUG_ON(i915_request_timeline(rq) == + rcu_access_pointer(signal->timeline)); + + rcu_read_lock(); + tl = rcu_dereference(signal->timeline); + if (i915_request_started(signal) || !kref_get_unless_zero(&tl->kref)) + tl = NULL; + rcu_read_unlock(); + if (!tl) /* already started or maybe even completed */ return 0; - return i915_sw_fence_await_dma_fence(&rq->submit, - &signal->fence, 0, - I915_FENCE_GFP); + fence = ERR_PTR(-EBUSY); + if (mutex_trylock(&tl->mutex)) { + fence = NULL; + if (!i915_request_started(signal) && + !list_is_first(&signal->link, &tl->requests)) { + signal = list_prev_entry(signal, link); + fence = dma_fence_get(&signal->fence); + } + mutex_unlock(&tl->mutex); + } + intel_timeline_put(tl); + if (IS_ERR_OR_NULL(fence)) + return PTR_ERR_OR_ZERO(fence); + + err = 0; + if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) + err = i915_sw_fence_await_dma_fence(&rq->submit, + fence, 0, + I915_FENCE_GFP); + dma_fence_put(fence); + + return err; } static intel_engine_mask_t @@ -821,34 +815,33 @@ emit_semaphore_wait(struct i915_request *to, struct i915_request *from, gfp_t gfp) { + const int has_token = INTEL_GEN(to->i915) >= 12; u32 hwsp_offset; + int len; u32 *cs; - int err; - GEM_BUG_ON(!from->timeline->has_initial_breadcrumb); GEM_BUG_ON(INTEL_GEN(to->i915) < 8); /* Just emit the first semaphore we see as request space is limited. */ if (already_busywaiting(to) & from->engine->mask) - return i915_sw_fence_await_dma_fence(&to->submit, - &from->fence, 0, - I915_FENCE_GFP); + goto await_fence; - err = i915_request_await_start(to, from); - if (err < 0) - return err; + if (i915_request_await_start(to, from) < 0) + goto await_fence; /* Only submit our spinner after the signaler is running! */ - err = __i915_request_await_execution(to, from, NULL, gfp); - if (err) - return err; + if (__i915_request_await_execution(to, from, NULL, gfp)) + goto await_fence; /* We need to pin the signaler's HWSP until we are finished reading. */ - err = intel_timeline_read_hwsp(from, to, &hwsp_offset); - if (err) - return err; + if (intel_timeline_read_hwsp(from, to, &hwsp_offset)) + goto await_fence; + + len = 4; + if (has_token) + len += 2; - cs = intel_ring_begin(to, 4); + cs = intel_ring_begin(to, len); if (IS_ERR(cs)) return PTR_ERR(cs); @@ -860,18 +853,28 @@ emit_semaphore_wait(struct i915_request *to, * (post-wrap) values than they were expecting (and so wait * forever). */ - *cs++ = MI_SEMAPHORE_WAIT | - MI_SEMAPHORE_GLOBAL_GTT | - MI_SEMAPHORE_POLL | - MI_SEMAPHORE_SAD_GTE_SDD; + *cs++ = (MI_SEMAPHORE_WAIT | + MI_SEMAPHORE_GLOBAL_GTT | + MI_SEMAPHORE_POLL | + MI_SEMAPHORE_SAD_GTE_SDD) + + has_token; *cs++ = from->fence.seqno; *cs++ = hwsp_offset; *cs++ = 0; + if (has_token) { + *cs++ = 0; + *cs++ = MI_NOOP; + } intel_ring_advance(to, cs); to->sched.semaphores |= from->engine->mask; to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN; return 0; + +await_fence: + return i915_sw_fence_await_dma_fence(&to->submit, + &from->fence, 0, + I915_FENCE_GFP); } static int @@ -955,21 +958,23 @@ i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) /* Squash repeated waits to the same timelines */ if (fence->context && - intel_timeline_sync_is_later(rq->timeline, fence)) + intel_timeline_sync_is_later(i915_request_timeline(rq), + fence)) continue; if (dma_fence_is_i915(fence)) ret = i915_request_await_request(rq, to_request(fence)); else ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, - I915_FENCE_TIMEOUT, + fence->context ? I915_FENCE_TIMEOUT : 0, I915_FENCE_GFP); if (ret < 0) return ret; /* Record the latest fence used against each timeline */ if (fence->context) - intel_timeline_sync_set(rq->timeline, fence); + intel_timeline_sync_set(i915_request_timeline(rq), + fence); } while (--nchild); return 0; @@ -1111,7 +1116,7 @@ void i915_request_skip(struct i915_request *rq, int error) static struct i915_request * __i915_request_add_to_timeline(struct i915_request *rq) { - struct intel_timeline *timeline = rq->timeline; + struct intel_timeline *timeline = i915_request_timeline(rq); struct i915_request *prev; /* @@ -1134,8 +1139,8 @@ __i915_request_add_to_timeline(struct i915_request *rq) * precludes optimising to use semaphores serialisation of a single * timeline across engines. */ - prev = rcu_dereference_protected(timeline->last_request.request, - lockdep_is_held(&timeline->mutex)); + prev = to_request(__i915_active_fence_set(&timeline->last_request, + &rq->fence)); if (prev && !i915_request_completed(prev)) { if (is_power_of_2(prev->engine->mask | rq->engine->mask)) i915_sw_fence_await_sw_fence(&rq->submit, @@ -1160,7 +1165,6 @@ __i915_request_add_to_timeline(struct i915_request *rq) * us, the timeline will hold its seqno which is later than ours. */ GEM_BUG_ON(timeline->seqno != rq->fence.seqno); - __i915_active_request_set(&timeline->last_request, rq); return prev; } @@ -1224,7 +1228,7 @@ void __i915_request_queue(struct i915_request *rq, void i915_request_add(struct i915_request *rq) { struct i915_sched_attr attr = rq->gem_context->sched; - struct intel_timeline * const tl = rq->timeline; + struct intel_timeline * const tl = i915_request_timeline(rq); struct i915_request *prev; lockdep_assert_held(&tl->mutex); @@ -1279,7 +1283,9 @@ void i915_request_add(struct i915_request *rq) * work on behalf of others -- but instead we should benefit from * improved resource management. (Well, that's the theory at least.) */ - if (prev && i915_request_completed(prev) && prev->timeline == tl) + if (prev && + i915_request_completed(prev) && + rcu_access_pointer(prev->timeline) == tl) i915_request_retire_upto(prev); mutex_unlock(&tl->mutex); @@ -1442,7 +1448,7 @@ long i915_request_wait(struct i915_request *rq, * completion. That requires having a good predictor for the request * duration, which we currently lack. */ - if (CONFIG_DRM_I915_SPIN_REQUEST && + if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) && __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) { dma_fence_signal(&rq->fence); goto out; @@ -1462,7 +1468,7 @@ long i915_request_wait(struct i915_request *rq, */ if (flags & I915_WAIT_PRIORITY) { if (!i915_request_started(rq) && INTEL_GEN(rq->i915) >= 6) - gen6_rps_boost(rq); + intel_rps_boost(rq); i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); } @@ -1488,6 +1494,7 @@ long i915_request_wait(struct i915_request *rq, break; } + intel_engine_flush_submission(rq->engine); timeout = io_schedule_timeout(timeout); } __set_current_state(TASK_RUNNING); @@ -1500,48 +1507,6 @@ out: return timeout; } -bool i915_retire_requests(struct drm_i915_private *i915) -{ - struct intel_gt_timelines *timelines = &i915->gt.timelines; - struct intel_timeline *tl, *tn; - unsigned long flags; - LIST_HEAD(free); - - spin_lock_irqsave(&timelines->lock, flags); - list_for_each_entry_safe(tl, tn, &timelines->active_list, link) { - if (!mutex_trylock(&tl->mutex)) - continue; - - intel_timeline_get(tl); - GEM_BUG_ON(!tl->active_count); - tl->active_count++; /* pin the list element */ - spin_unlock_irqrestore(&timelines->lock, flags); - - retire_requests(tl); - - spin_lock_irqsave(&timelines->lock, flags); - - /* Resume iteration after dropping lock */ - list_safe_reset_next(tl, tn, link); - if (!--tl->active_count) - list_del(&tl->link); - - mutex_unlock(&tl->mutex); - - /* Defer the final release to after the spinlock */ - if (refcount_dec_and_test(&tl->kref.refcount)) { - GEM_BUG_ON(tl->active_count); - list_add(&tl->link, &free); - } - } - spin_unlock_irqrestore(&timelines->lock, flags); - - list_for_each_entry_safe(tl, tn, &free, link) - __intel_timeline_free(&tl->kref); - - return !list_empty(&timelines->active_list); -} - #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) #include "selftests/mock_request.c" #include "selftests/i915_request.c" diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index e4dd013761e8..96991d64759c 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -113,7 +113,7 @@ struct i915_request { struct intel_engine_cs *engine; struct intel_context *hw_context; struct intel_ring *ring; - struct intel_timeline *timeline; + struct intel_timeline __rcu *timeline; struct list_head signal_link; /* @@ -211,14 +211,14 @@ struct i915_request { * on the active_list (of their final request). */ struct i915_capture_list *capture_list; - struct list_head active_list; /** Time at which this request was emitted, in jiffies. */ unsigned long emitted_jiffies; unsigned long flags; -#define I915_REQUEST_WAITBOOST BIT(0) -#define I915_REQUEST_NOPREEMPT BIT(1) +#define I915_REQUEST_WAITBOOST BIT(0) +#define I915_REQUEST_NOPREEMPT BIT(1) +#define I915_REQUEST_SENTINEL BIT(2) /** timeline->request entry for this request */ struct list_head link; @@ -251,6 +251,7 @@ struct i915_request *__i915_request_commit(struct i915_request *request); void __i915_request_queue(struct i915_request *rq, const struct i915_sched_attr *attr); +bool i915_request_retire(struct i915_request *rq); void i915_request_retire_upto(struct i915_request *rq); static inline struct i915_request * @@ -309,10 +310,8 @@ long i915_request_wait(struct i915_request *rq, long timeout) __attribute__((nonnull(1))); #define I915_WAIT_INTERRUPTIBLE BIT(0) -#define I915_WAIT_LOCKED BIT(1) /* struct_mutex held, handle GPU reset */ -#define I915_WAIT_PRIORITY BIT(2) /* small priority bump for the request */ -#define I915_WAIT_ALL BIT(3) /* used by i915_gem_object_wait() */ -#define I915_WAIT_FOR_IDLE_BOOST BIT(4) +#define I915_WAIT_PRIORITY BIT(1) /* small priority bump for the request */ +#define I915_WAIT_ALL BIT(2) /* used by i915_gem_object_wait() */ static inline bool i915_request_signaled(const struct i915_request *rq) { @@ -442,6 +441,29 @@ static inline bool i915_request_has_nopreempt(const struct i915_request *rq) return unlikely(rq->flags & I915_REQUEST_NOPREEMPT); } -bool i915_retire_requests(struct drm_i915_private *i915); +static inline bool i915_request_has_sentinel(const struct i915_request *rq) +{ + return unlikely(rq->flags & I915_REQUEST_SENTINEL); +} + +static inline struct intel_timeline * +i915_request_timeline(struct i915_request *rq) +{ + /* Valid only while the request is being constructed (or retired). */ + return rcu_dereference_protected(rq->timeline, + lockdep_is_held(&rcu_access_pointer(rq->timeline)->mutex)); +} + +static inline struct intel_timeline * +i915_request_active_timeline(struct i915_request *rq) +{ + /* + * When in use during submission, we are protected by a guarantee that + * the context/timeline is pinned and must remain pinned until after + * this submission. + */ + return rcu_dereference_protected(rq->timeline, + lockdep_is_held(&rq->engine->active.lock)); +} #endif /* I915_REQUEST_H */ diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index 6617963df9ed..b7b59328cb76 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -67,15 +67,15 @@ static inline struct scatterlist *__sg_next(struct scatterlist *sg) } /** - * __for_each_sgt_dma - iterate over the DMA addresses of the given sg_table - * @__dmap: DMA address (output) + * __for_each_sgt_daddr - iterate over the device addresses of the given sg_table + * @__dp: Device address (output) * @__iter: 'struct sgt_iter' (iterator state, internal) * @__sgt: sg_table to iterate over (input) * @__step: step size */ -#define __for_each_sgt_dma(__dmap, __iter, __sgt, __step) \ +#define __for_each_sgt_daddr(__dp, __iter, __sgt, __step) \ for ((__iter) = __sgt_iter((__sgt)->sgl, true); \ - ((__dmap) = (__iter).dma + (__iter).curr); \ + ((__dp) = (__iter).dma + (__iter).curr), (__iter).sgp; \ (((__iter).curr += (__step)) >= (__iter).max) ? \ (__iter) = __sgt_iter(__sg_next((__iter).sgp), true), 0 : 0) diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c index 3eba8a2b39c2..010d67f48ad9 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.c +++ b/drivers/gpu/drm/i915/i915_scheduler.c @@ -211,10 +211,7 @@ static void kick_submission(struct intel_engine_cs *engine, /* * If we are already the currently executing context, don't - * bother evaluating if we should preempt ourselves, or if - * we expect nothing to change as a result of running the - * tasklet, i.e. we have not change the priority queue - * sufficiently to oust the running context. + * bother evaluating if we should preempt ourselves. */ if (inflight->hw_context == rq->hw_context) goto unlock; diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h index 7eefccff39bf..07d243acf553 100644 --- a/drivers/gpu/drm/i915/i915_scheduler.h +++ b/drivers/gpu/drm/i915/i915_scheduler.h @@ -52,22 +52,4 @@ static inline void i915_priolist_free(struct i915_priolist *p) __i915_priolist_free(p); } -static inline bool i915_scheduler_need_preempt(int prio, int active) -{ - /* - * Allow preemption of low -> normal -> high, but we do - * not allow low priority tasks to preempt other low priority - * tasks under the impression that latency for low priority - * tasks does not matter (as much as background throughput), - * so kiss. - * - * More naturally we would write - * prio >= max(0, last); - * except that we wish to prevent triggering preemption at the same - * priority level: the task that is running should remain running - * to preserve FIFO ordering of dependencies. - */ - return prio > max(I915_PRIORITY_NORMAL - 1, active); -} - #endif /* _I915_SCHEDULER_H_ */ diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h index aad81acba9dc..d18e70550054 100644 --- a/drivers/gpu/drm/i915/i915_scheduler_types.h +++ b/drivers/gpu/drm/i915/i915_scheduler_types.h @@ -49,6 +49,15 @@ struct i915_sched_attr { * DAG of each request, we are able to insert it into a sorted queue when it * is ready, and are able to reorder its portion of the graph to accommodate * dynamic priority changes. + * + * Ok, there is now one active element to the "scheduler" in the backends. + * We let a new context run for a small amount of time before re-evaluating + * the run order. As we re-evaluate, we maintain the strict ordering of + * dependencies, but attempt to rotate the active contexts (the current context + * is put to the back of its priority queue, then reshuffling its dependents). + * This provides minimal timeslicing and prevents a userspace hog (e.g. + * something waiting on a user semaphore [VkEvent]) from denying service to + * others. */ struct i915_sched_node { struct list_head signalers_list; /* those before us, we depend upon */ diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 8508a01ad8b9..8812cdd9007f 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -28,6 +28,7 @@ #include "display/intel_fbc.h" #include "display/intel_gmbus.h" +#include "display/intel_vga.h" #include "i915_drv.h" #include "i915_reg.h" @@ -57,7 +58,7 @@ static void i915_restore_display(struct drm_i915_private *dev_priv) if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) <= 4 && !IS_G4X(dev_priv)) I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL); - i915_redisable_vga(dev_priv); + intel_vga_redisable(dev_priv); } int i915_save_state(struct drm_i915_private *dev_priv) @@ -65,8 +66,6 @@ int i915_save_state(struct drm_i915_private *dev_priv) struct pci_dev *pdev = dev_priv->drm.pdev; int i; - mutex_lock(&dev_priv->drm.struct_mutex); - i915_save_display(dev_priv); if (IS_GEN(dev_priv, 4)) @@ -100,8 +99,6 @@ int i915_save_state(struct drm_i915_private *dev_priv) dev_priv->regfile.saveSWF3[i] = I915_READ(SWF3(i)); } - mutex_unlock(&dev_priv->drm.struct_mutex); - return 0; } @@ -110,8 +107,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv) struct pci_dev *pdev = dev_priv->drm.pdev; int i; - mutex_lock(&dev_priv->drm.struct_mutex); - if (IS_GEN(dev_priv, 4)) pci_write_config_word(pdev, GCDGMBUS, dev_priv->regfile.saveGCDGMBUS); @@ -145,8 +140,6 @@ int i915_restore_state(struct drm_i915_private *dev_priv) I915_WRITE(SWF3(i), dev_priv->regfile.saveSWF3[i]); } - mutex_unlock(&dev_priv->drm.struct_mutex); - intel_gmbus_reset(dev_priv); return 0; diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c new file mode 100644 index 000000000000..39c79e1c5b52 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_switcheroo.c @@ -0,0 +1,67 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include <linux/vga_switcheroo.h> + +#include "i915_drv.h" +#include "i915_switcheroo.h" + +static void i915_switcheroo_set_state(struct pci_dev *pdev, + enum vga_switcheroo_state state) +{ + struct drm_i915_private *i915 = pdev_to_i915(pdev); + pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; + + if (!i915) { + dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n"); + return; + } + + if (state == VGA_SWITCHEROO_ON) { + pr_info("switched on\n"); + i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; + /* i915 resume handler doesn't set to D0 */ + pci_set_power_state(pdev, PCI_D0); + i915_resume_switcheroo(i915); + i915->drm.switch_power_state = DRM_SWITCH_POWER_ON; + } else { + pr_info("switched off\n"); + i915->drm.switch_power_state = DRM_SWITCH_POWER_CHANGING; + i915_suspend_switcheroo(i915, pmm); + i915->drm.switch_power_state = DRM_SWITCH_POWER_OFF; + } +} + +static bool i915_switcheroo_can_switch(struct pci_dev *pdev) +{ + struct drm_i915_private *i915 = pdev_to_i915(pdev); + + /* + * FIXME: open_count is protected by drm_global_mutex but that would lead to + * locking inversion with the driver load path. And the access here is + * completely racy anyway. So don't bother with locking for now. + */ + return i915 && i915->drm.open_count == 0; +} + +static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { + .set_gpu_state = i915_switcheroo_set_state, + .reprobe = NULL, + .can_switch = i915_switcheroo_can_switch, +}; + +int i915_switcheroo_register(struct drm_i915_private *i915) +{ + struct pci_dev *pdev = i915->drm.pdev; + + return vga_switcheroo_register_client(pdev, &i915_switcheroo_ops, false); +} + +void i915_switcheroo_unregister(struct drm_i915_private *i915) +{ + struct pci_dev *pdev = i915->drm.pdev; + + vga_switcheroo_unregister_client(pdev); +} diff --git a/drivers/gpu/drm/i915/i915_switcheroo.h b/drivers/gpu/drm/i915/i915_switcheroo.h new file mode 100644 index 000000000000..59b6c1e07d75 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_switcheroo.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __I915_SWITCHEROO__ +#define __I915_SWITCHEROO__ + +struct drm_i915_private; + +int i915_switcheroo_register(struct drm_i915_private *i915); +void i915_switcheroo_unregister(struct drm_i915_private *i915); + +#endif /* __I915_SWITCHEROO__ */ diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index d8a3b180c084..65476909d1bf 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -30,6 +30,9 @@ #include <linux/stat.h> #include <linux/sysfs.h> +#include "gt/intel_rc6.h" +#include "gt/intel_rps.h" + #include "i915_drv.h" #include "i915_sysfs.h" #include "intel_pm.h" @@ -49,7 +52,7 @@ static u32 calc_residency(struct drm_i915_private *dev_priv, u64 res = 0; with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) - res = intel_rc6_residency_us(dev_priv, reg); + res = intel_rc6_residency_us(&dev_priv->gt.rc6, reg); return DIV_ROUND_CLOSEST_ULL(res, 1000); } @@ -142,12 +145,12 @@ static const struct attribute_group media_rc6_attr_group = { }; #endif -static int l3_access_valid(struct drm_i915_private *dev_priv, loff_t offset) +static int l3_access_valid(struct drm_i915_private *i915, loff_t offset) { - if (!HAS_L3_DPF(dev_priv)) + if (!HAS_L3_DPF(i915)) return -EPERM; - if (offset % 4 != 0) + if (!IS_ALIGNED(offset, sizeof(u32))) return -EINVAL; if (offset >= GEN7_L3LOG_SIZE) @@ -162,31 +165,24 @@ i915_l3_read(struct file *filp, struct kobject *kobj, loff_t offset, size_t count) { struct device *kdev = kobj_to_dev(kobj); - struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct drm_device *dev = &dev_priv->drm; + struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); int slice = (int)(uintptr_t)attr->private; int ret; - count = round_down(count, 4); - - ret = l3_access_valid(dev_priv, offset); + ret = l3_access_valid(i915, offset); if (ret) return ret; + count = round_down(count, sizeof(u32)); count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count); + memset(buf, 0, count); - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; - - if (dev_priv->l3_parity.remap_info[slice]) + spin_lock(&i915->gem.contexts.lock); + if (i915->l3_parity.remap_info[slice]) memcpy(buf, - dev_priv->l3_parity.remap_info[slice] + (offset/4), + i915->l3_parity.remap_info[slice] + offset / sizeof(u32), count); - else - memset(buf, 0, count); - - mutex_unlock(&dev->struct_mutex); + spin_unlock(&i915->gem.contexts.lock); return count; } @@ -197,46 +193,49 @@ i915_l3_write(struct file *filp, struct kobject *kobj, loff_t offset, size_t count) { struct device *kdev = kobj_to_dev(kobj); - struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct drm_device *dev = &dev_priv->drm; - struct i915_gem_context *ctx; + struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); int slice = (int)(uintptr_t)attr->private; - u32 **remap_info; + u32 *remap_info, *freeme = NULL; + struct i915_gem_context *ctx; int ret; - ret = l3_access_valid(dev_priv, offset); + ret = l3_access_valid(i915, offset); if (ret) return ret; - ret = i915_mutex_lock_interruptible(dev); - if (ret) - return ret; + if (count < sizeof(u32)) + return -EINVAL; - remap_info = &dev_priv->l3_parity.remap_info[slice]; - if (!*remap_info) { - *remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); - if (!*remap_info) { - ret = -ENOMEM; - goto out; - } + remap_info = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL); + if (!remap_info) + return -ENOMEM; + + spin_lock(&i915->gem.contexts.lock); + + if (i915->l3_parity.remap_info[slice]) { + freeme = remap_info; + remap_info = i915->l3_parity.remap_info[slice]; + } else { + i915->l3_parity.remap_info[slice] = remap_info; } - /* TODO: Ideally we really want a GPU reset here to make sure errors - * aren't propagated. Since I cannot find a stable way to reset the GPU - * at this point it is left as a TODO. - */ - memcpy(*remap_info + (offset/4), buf, count); + count = round_down(count, sizeof(u32)); + memcpy(remap_info + offset / sizeof(u32), buf, count); /* NB: We defer the remapping until we switch to the context */ - list_for_each_entry(ctx, &dev_priv->contexts.list, link) - ctx->remap_slice |= (1<<slice); + list_for_each_entry(ctx, &i915->gem.contexts.list, link) + ctx->remap_slice |= BIT(slice); - ret = count; + spin_unlock(&i915->gem.contexts.lock); + kfree(freeme); -out: - mutex_unlock(&dev->struct_mutex); + /* + * TODO: Ideally we really want a GPU reset here to make sure errors + * aren't propagated. Since I cannot find a stable way to reset the GPU + * at this point it is left as a TODO. + */ - return ret; + return count; } static const struct bin_attribute dpf_attrs = { @@ -261,6 +260,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + struct intel_rps *rps = &dev_priv->gt.rps; intel_wakeref_t wakeref; u32 freq; @@ -273,31 +273,31 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, freq = (freq >> 8) & 0xff; } else { - freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1)); + freq = intel_get_cagf(rps, I915_READ(GEN6_RPSTAT1)); } intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); - return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq)); + return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(rps, freq)); } static ssize_t gt_cur_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + struct intel_rps *rps = &dev_priv->gt.rps; return snprintf(buf, PAGE_SIZE, "%d\n", - intel_gpu_freq(dev_priv, - dev_priv->gt_pm.rps.cur_freq)); + intel_gpu_freq(rps, rps->cur_freq)); } static ssize_t gt_boost_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + struct intel_rps *rps = &dev_priv->gt.rps; return snprintf(buf, PAGE_SIZE, "%d\n", - intel_gpu_freq(dev_priv, - dev_priv->gt_pm.rps.boost_freq)); + intel_gpu_freq(rps, rps->boost_freq)); } static ssize_t gt_boost_freq_mhz_store(struct device *kdev, @@ -305,7 +305,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, const char *buf, size_t count) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_rps *rps = &dev_priv->gt.rps; bool boost = false; ssize_t ret; u32 val; @@ -315,7 +315,7 @@ static ssize_t gt_boost_freq_mhz_store(struct device *kdev, return ret; /* Validate against (static) hardware limits */ - val = intel_freq_opcode(dev_priv, val); + val = intel_freq_opcode(rps, val); if (val < rps->min_freq || val > rps->max_freq) return -EINVAL; @@ -335,19 +335,19 @@ static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + struct intel_rps *rps = &dev_priv->gt.rps; return snprintf(buf, PAGE_SIZE, "%d\n", - intel_gpu_freq(dev_priv, - dev_priv->gt_pm.rps.efficient_freq)); + intel_gpu_freq(rps, rps->efficient_freq)); } static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + struct intel_rps *rps = &dev_priv->gt.rps; return snprintf(buf, PAGE_SIZE, "%d\n", - intel_gpu_freq(dev_priv, - dev_priv->gt_pm.rps.max_freq_softlimit)); + intel_gpu_freq(rps, rps->max_freq_softlimit)); } static ssize_t gt_max_freq_mhz_store(struct device *kdev, @@ -355,19 +355,17 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, const char *buf, size_t count) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &dev_priv->gt_pm.rps; - intel_wakeref_t wakeref; - u32 val; + struct intel_rps *rps = &dev_priv->gt.rps; ssize_t ret; + u32 val; ret = kstrtou32(buf, 0, &val); if (ret) return ret; - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&rps->lock); - val = intel_freq_opcode(dev_priv, val); + val = intel_freq_opcode(rps, val); if (val < rps->min_freq || val > rps->max_freq || val < rps->min_freq_softlimit) { @@ -377,7 +375,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, if (val > rps->rp0_freq) DRM_DEBUG("User requested overclocking to %d\n", - intel_gpu_freq(dev_priv, val)); + intel_gpu_freq(rps, val)); rps->max_freq_softlimit = val; @@ -385,14 +383,15 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, rps->min_freq_softlimit, rps->max_freq_softlimit); - /* We still need *_set_rps to process the new max_delay and + /* + * We still need *_set_rps to process the new max_delay and * update the interrupt limits and PMINTRMSK even though - * frequency request may be unchanged. */ - ret = intel_set_rps(dev_priv, val); + * frequency request may be unchanged. + */ + intel_rps_set(rps, val); unlock: mutex_unlock(&rps->lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret ?: count; } @@ -400,10 +399,10 @@ unlock: static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); + struct intel_rps *rps = &dev_priv->gt.rps; return snprintf(buf, PAGE_SIZE, "%d\n", - intel_gpu_freq(dev_priv, - dev_priv->gt_pm.rps.min_freq_softlimit)); + intel_gpu_freq(rps, rps->min_freq_softlimit)); } static ssize_t gt_min_freq_mhz_store(struct device *kdev, @@ -411,19 +410,17 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, const char *buf, size_t count) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &dev_priv->gt_pm.rps; - intel_wakeref_t wakeref; - u32 val; + struct intel_rps *rps = &dev_priv->gt.rps; ssize_t ret; + u32 val; ret = kstrtou32(buf, 0, &val); if (ret) return ret; - wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&rps->lock); - val = intel_freq_opcode(dev_priv, val); + val = intel_freq_opcode(rps, val); if (val < rps->min_freq || val > rps->max_freq || val > rps->max_freq_softlimit) { @@ -437,14 +434,15 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, rps->min_freq_softlimit, rps->max_freq_softlimit); - /* We still need *_set_rps to process the new min_delay and + /* + * We still need *_set_rps to process the new min_delay and * update the interrupt limits and PMINTRMSK even though - * frequency request may be unchanged. */ - ret = intel_set_rps(dev_priv, val); + * frequency request may be unchanged. + */ + intel_rps_set(rps, val); unlock: mutex_unlock(&rps->lock); - intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret ?: count; } @@ -466,15 +464,15 @@ static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) { struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev); - struct intel_rps *rps = &dev_priv->gt_pm.rps; + struct intel_rps *rps = &dev_priv->gt.rps; u32 val; if (attr == &dev_attr_gt_RP0_freq_mhz) - val = intel_gpu_freq(dev_priv, rps->rp0_freq); + val = intel_gpu_freq(rps, rps->rp0_freq); else if (attr == &dev_attr_gt_RP1_freq_mhz) - val = intel_gpu_freq(dev_priv, rps->rp1_freq); + val = intel_gpu_freq(rps, rps->rp1_freq); else if (attr == &dev_attr_gt_RPn_freq_mhz) - val = intel_gpu_freq(dev_priv, rps->min_freq); + val = intel_gpu_freq(rps, rps->min_freq); else BUG(); diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h index 24f2944da09d..7ef7a1e1664c 100644 --- a/drivers/gpu/drm/i915/i915_trace.h +++ b/drivers/gpu/drm/i915/i915_trace.h @@ -665,7 +665,6 @@ TRACE_EVENT(i915_request_queue, TP_STRUCT__entry( __field(u32, dev) - __field(u32, hw_id) __field(u64, ctx) __field(u16, class) __field(u16, instance) @@ -675,7 +674,6 @@ TRACE_EVENT(i915_request_queue, TP_fast_assign( __entry->dev = rq->i915->drm.primary->index; - __entry->hw_id = rq->gem_context->hw_id; __entry->class = rq->engine->uabi_class; __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; @@ -683,10 +681,9 @@ TRACE_EVENT(i915_request_queue, __entry->flags = flags; ), - TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x", + TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x", __entry->dev, __entry->class, __entry->instance, - __entry->hw_id, __entry->ctx, __entry->seqno, - __entry->flags) + __entry->ctx, __entry->seqno, __entry->flags) ); DECLARE_EVENT_CLASS(i915_request, @@ -695,7 +692,6 @@ DECLARE_EVENT_CLASS(i915_request, TP_STRUCT__entry( __field(u32, dev) - __field(u32, hw_id) __field(u64, ctx) __field(u16, class) __field(u16, instance) @@ -704,16 +700,15 @@ DECLARE_EVENT_CLASS(i915_request, TP_fast_assign( __entry->dev = rq->i915->drm.primary->index; - __entry->hw_id = rq->gem_context->hw_id; __entry->class = rq->engine->uabi_class; __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; __entry->seqno = rq->fence.seqno; ), - TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u", + TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u", __entry->dev, __entry->class, __entry->instance, - __entry->hw_id, __entry->ctx, __entry->seqno) + __entry->ctx, __entry->seqno) ); DEFINE_EVENT(i915_request, i915_request_add, @@ -738,7 +733,6 @@ TRACE_EVENT(i915_request_in, TP_STRUCT__entry( __field(u32, dev) - __field(u32, hw_id) __field(u64, ctx) __field(u16, class) __field(u16, instance) @@ -749,7 +743,6 @@ TRACE_EVENT(i915_request_in, TP_fast_assign( __entry->dev = rq->i915->drm.primary->index; - __entry->hw_id = rq->gem_context->hw_id; __entry->class = rq->engine->uabi_class; __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; @@ -758,9 +751,9 @@ TRACE_EVENT(i915_request_in, __entry->port = port; ), - TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, prio=%u, port=%u", + TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%u, port=%u", __entry->dev, __entry->class, __entry->instance, - __entry->hw_id, __entry->ctx, __entry->seqno, + __entry->ctx, __entry->seqno, __entry->prio, __entry->port) ); @@ -770,7 +763,6 @@ TRACE_EVENT(i915_request_out, TP_STRUCT__entry( __field(u32, dev) - __field(u32, hw_id) __field(u64, ctx) __field(u16, class) __field(u16, instance) @@ -780,7 +772,6 @@ TRACE_EVENT(i915_request_out, TP_fast_assign( __entry->dev = rq->i915->drm.primary->index; - __entry->hw_id = rq->gem_context->hw_id; __entry->class = rq->engine->uabi_class; __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; @@ -788,10 +779,9 @@ TRACE_EVENT(i915_request_out, __entry->completed = i915_request_completed(rq); ), - TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, completed?=%u", + TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, completed?=%u", __entry->dev, __entry->class, __entry->instance, - __entry->hw_id, __entry->ctx, __entry->seqno, - __entry->completed) + __entry->ctx, __entry->seqno, __entry->completed) ); #else @@ -829,7 +819,6 @@ TRACE_EVENT(i915_request_wait_begin, TP_STRUCT__entry( __field(u32, dev) - __field(u32, hw_id) __field(u64, ctx) __field(u16, class) __field(u16, instance) @@ -845,7 +834,6 @@ TRACE_EVENT(i915_request_wait_begin, */ TP_fast_assign( __entry->dev = rq->i915->drm.primary->index; - __entry->hw_id = rq->gem_context->hw_id; __entry->class = rq->engine->uabi_class; __entry->instance = rq->engine->uabi_instance; __entry->ctx = rq->fence.context; @@ -853,9 +841,9 @@ TRACE_EVENT(i915_request_wait_begin, __entry->flags = flags; ), - TP_printk("dev=%u, engine=%u:%u, hw_id=%u, ctx=%llu, seqno=%u, flags=0x%x", + TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, flags=0x%x", __entry->dev, __entry->class, __entry->instance, - __entry->hw_id, __entry->ctx, __entry->seqno, + __entry->ctx, __entry->seqno, __entry->flags) ); @@ -958,19 +946,17 @@ DECLARE_EVENT_CLASS(i915_context, TP_STRUCT__entry( __field(u32, dev) __field(struct i915_gem_context *, ctx) - __field(u32, hw_id) __field(struct i915_address_space *, vm) ), TP_fast_assign( __entry->dev = ctx->i915->drm.primary->index; __entry->ctx = ctx; - __entry->hw_id = ctx->hw_id; - __entry->vm = ctx->vm; + __entry->vm = rcu_access_pointer(ctx->vm); ), - TP_printk("dev=%u, ctx=%p, ctx_vm=%p, hw_id=%u", - __entry->dev, __entry->ctx, __entry->vm, __entry->hw_id) + TP_printk("dev=%u, ctx=%p, ctx_vm=%p", + __entry->dev, __entry->ctx, __entry->vm) ) DEFINE_EVENT(i915_context, i915_context_create, diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c index 16acdf7bdbe6..0348c6d0ef5f 100644 --- a/drivers/gpu/drm/i915/i915_utils.c +++ b/drivers/gpu/drm/i915/i915_utils.c @@ -54,25 +54,54 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) static unsigned int i915_probe_fail_count; -int __i915_inject_load_error(struct drm_i915_private *i915, int err, - const char *func, int line) +int __i915_inject_probe_error(struct drm_i915_private *i915, int err, + const char *func, int line) { - if (i915_probe_fail_count >= i915_modparams.inject_load_failure) + if (i915_probe_fail_count >= i915_modparams.inject_probe_failure) return 0; - if (++i915_probe_fail_count < i915_modparams.inject_load_failure) + if (++i915_probe_fail_count < i915_modparams.inject_probe_failure) return 0; __i915_printk(i915, KERN_INFO, "Injecting failure %d at checkpoint %u [%s:%d]\n", - err, i915_modparams.inject_load_failure, func, line); - i915_modparams.inject_load_failure = 0; + err, i915_modparams.inject_probe_failure, func, line); + i915_modparams.inject_probe_failure = 0; return err; } bool i915_error_injected(void) { - return i915_probe_fail_count && !i915_modparams.inject_load_failure; + return i915_probe_fail_count && !i915_modparams.inject_probe_failure; } #endif + +void cancel_timer(struct timer_list *t) +{ + if (!READ_ONCE(t->expires)) + return; + + del_timer(t); + WRITE_ONCE(t->expires, 0); +} + +void set_timer_ms(struct timer_list *t, unsigned long timeout) +{ + if (!timeout) { + cancel_timer(t); + return; + } + + timeout = msecs_to_jiffies_timeout(timeout); + + /* + * Paranoia to make sure the compiler computes the timeout before + * loading 'jiffies' as jiffies is volatile and may be updated in + * the background by a timer tick. All to reduce the complexity + * of the addition and reduce the risk of losing a jiffie. + */ + barrier(); + + mod_timer(t, jiffies + timeout); +} diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 562f756da421..04139ba1191e 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -32,6 +32,7 @@ #include <linux/workqueue.h> struct drm_i915_private; +struct timer_list; #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -60,20 +61,20 @@ __i915_printk(struct drm_i915_private *dev_priv, const char *level, #if IS_ENABLED(CONFIG_DRM_I915_DEBUG) -int __i915_inject_load_error(struct drm_i915_private *i915, int err, - const char *func, int line); -#define i915_inject_load_error(_i915, _err) \ - __i915_inject_load_error((_i915), (_err), __func__, __LINE__) +int __i915_inject_probe_error(struct drm_i915_private *i915, int err, + const char *func, int line); +#define i915_inject_probe_error(_i915, _err) \ + __i915_inject_probe_error((_i915), (_err), __func__, __LINE__) bool i915_error_injected(void); #else -#define i915_inject_load_error(_i915, _err) 0 +#define i915_inject_probe_error(_i915, _err) 0 #define i915_error_injected() false #endif -#define i915_inject_probe_failure(i915) i915_inject_load_error((i915), -ENODEV) +#define i915_inject_probe_failure(i915) i915_inject_probe_error((i915), -ENODEV) #define i915_probe_error(i915, fmt, ...) \ __i915_printk(i915, i915_error_injected() ? KERN_DEBUG : KERN_ERR, \ @@ -421,4 +422,25 @@ static inline void add_taint_for_CI(unsigned int taint) add_taint(taint, LOCKDEP_STILL_OK); } +void cancel_timer(struct timer_list *t); +void set_timer_ms(struct timer_list *t, unsigned long timeout); + +static inline bool timer_expired(const struct timer_list *t) +{ + return READ_ONCE(t->expires) && !timer_pending(t); +} + +/* + * This is a lookalike for IS_ENABLED() that takes a kconfig value, + * e.g. CONFIG_DRM_I915_SPIN_REQUEST, and evaluates whether it is non-zero + * i.e. whether the configuration is active. Wrapping up the config inside + * a boolean context prevents clang and smatch from complaining about potential + * issues in confusing logical-&& with bitwise-& for constants. + * + * Sadly IS_ENABLED() itself does not work with kconfig values. + * + * Returns 0 if @config is 0, 1 if set to any value. + */ +#define IS_ACTIVE(config) ((config) != 0) + #endif /* !__I915_UTILS_H */ diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index e0e677b2a3a9..e5512f26e20a 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -32,6 +32,7 @@ #include "i915_drv.h" #include "i915_globals.h" +#include "i915_sw_fence_work.h" #include "i915_trace.h" #include "i915_vma.h" @@ -90,6 +91,7 @@ static int __i915_vma_active(struct i915_active *ref) return i915_vma_tryget(active_to_vma(ref)) ? 0 : -ENOENT; } +__i915_active_call static void __i915_vma_retire(struct i915_active *ref) { i915_vma_put(active_to_vma(ref)); @@ -104,21 +106,21 @@ vma_create(struct drm_i915_gem_object *obj, struct rb_node *rb, **p; /* The aliasing_ppgtt should never be used directly! */ - GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); + GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); vma = i915_vma_alloc(); if (vma == NULL) return ERR_PTR(-ENOMEM); - vma->vm = vm; + mutex_init(&vma->pages_mutex); + vma->vm = i915_vm_get(vm); vma->ops = &vm->vma_ops; vma->obj = obj; vma->resv = obj->base.resv; vma->size = obj->base.size; vma->display_alignment = I915_GTT_MIN_ALIGNMENT; - i915_active_init(vm->i915, &vma->active, - __i915_vma_active, __i915_vma_retire); + i915_active_init(&vma->active, __i915_vma_active, __i915_vma_retire); /* Declare ourselves safe for use inside shrinkers */ if (IS_ENABLED(CONFIG_LOCKDEP)) { @@ -171,7 +173,7 @@ vma_create(struct drm_i915_gem_object *obj, i915_gem_object_get_stride(obj)); GEM_BUG_ON(!is_power_of_2(vma->fence_alignment)); - vma->flags |= I915_VMA_GGTT; + __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); } spin_lock(&obj->vma.lock); @@ -218,10 +220,6 @@ vma_create(struct drm_i915_gem_object *obj, spin_unlock(&obj->vma.lock); - mutex_lock(&vm->mutex); - list_add(&vma->vm_link, &vm->unbound_list); - mutex_unlock(&vm->mutex); - return vma; err_vma: @@ -265,8 +263,6 @@ vma_lookup(struct drm_i915_gem_object *obj, * Once created, the VMA is kept until either the object is freed, or the * address space is closed. * - * Must be called with struct_mutex held. - * * Returns the vma, or an error pointer. */ struct i915_vma * @@ -277,7 +273,7 @@ i915_vma_instance(struct drm_i915_gem_object *obj, struct i915_vma *vma; GEM_BUG_ON(view && !i915_is_ggtt(vm)); - GEM_BUG_ON(vm->closed); + GEM_BUG_ON(!atomic_read(&vm->open)); spin_lock(&obj->vma.lock); vma = vma_lookup(obj, vm, view); @@ -291,18 +287,63 @@ i915_vma_instance(struct drm_i915_gem_object *obj, return vma; } +struct i915_vma_work { + struct dma_fence_work base; + struct i915_vma *vma; + enum i915_cache_level cache_level; + unsigned int flags; +}; + +static int __vma_bind(struct dma_fence_work *work) +{ + struct i915_vma_work *vw = container_of(work, typeof(*vw), base); + struct i915_vma *vma = vw->vma; + int err; + + err = vma->ops->bind_vma(vma, vw->cache_level, vw->flags); + if (err) + atomic_or(I915_VMA_ERROR, &vma->flags); + + if (vma->obj) + __i915_gem_object_unpin_pages(vma->obj); + + return err; +} + +static const struct dma_fence_work_ops bind_ops = { + .name = "bind", + .work = __vma_bind, +}; + +struct i915_vma_work *i915_vma_work(void) +{ + struct i915_vma_work *vw; + + vw = kzalloc(sizeof(*vw), GFP_KERNEL); + if (!vw) + return NULL; + + dma_fence_work_init(&vw->base, &bind_ops); + vw->base.dma.error = -EAGAIN; /* disable the worker by default */ + + return vw; +} + /** * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space. * @vma: VMA to map * @cache_level: mapping cache level * @flags: flags like global or local mapping + * @work: preallocated worker for allocating and binding the PTE * * DMA addresses are taken from the scatter-gather table of this object (or of * this VMA in case of non-default GGTT views) and PTE entries set up. * Note that DMA addresses are also the only part of the SG table we care about. */ -int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, - u32 flags) +int i915_vma_bind(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags, + struct i915_vma_work *work) { u32 bind_flags; u32 vma_flags; @@ -319,13 +360,11 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, if (GEM_DEBUG_WARN_ON(!flags)) return -EINVAL; - bind_flags = 0; - if (flags & PIN_GLOBAL) - bind_flags |= I915_VMA_GLOBAL_BIND; - if (flags & PIN_USER) - bind_flags |= I915_VMA_LOCAL_BIND; + bind_flags = flags; + bind_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; - vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); + vma_flags = atomic_read(&vma->flags); + vma_flags &= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; if (flags & PIN_UPDATE) bind_flags |= vma_flags; else @@ -336,11 +375,34 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, GEM_BUG_ON(!vma->pages); trace_i915_vma_bind(vma, bind_flags); - ret = vma->ops->bind_vma(vma, cache_level, bind_flags); - if (ret) - return ret; + if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) { + work->vma = vma; + work->cache_level = cache_level; + work->flags = bind_flags | I915_VMA_ALLOC; - vma->flags |= bind_flags; + /* + * Note we only want to chain up to the migration fence on + * the pages (not the object itself). As we don't track that, + * yet, we have to use the exclusive fence instead. + * + * Also note that we do not want to track the async vma as + * part of the obj->resv->excl_fence as it only affects + * execution and not content or object's backing store lifetime. + */ + GEM_BUG_ON(i915_active_has_exclusive(&vma->active)); + i915_active_set_exclusive(&vma->active, &work->base.dma); + work->base.dma.error = 0; /* enable the queue_work() */ + + if (vma->obj) + __i915_gem_object_pin_pages(vma->obj); + } else { + GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags); + ret = vma->ops->bind_vma(vma, cache_level, bind_flags); + if (ret) + return ret; + } + + atomic_or(bind_flags, &vma->flags); return 0; } @@ -350,18 +412,16 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) int err; /* Access through the GTT requires the device to be awake. */ - assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm); - - lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); - if (WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { + assert_rpm_wakelock_held(vma->vm->gt->uncore->rpm); + if (GEM_WARN_ON(!i915_vma_is_map_and_fenceable(vma))) { err = -ENODEV; goto err; } GEM_BUG_ON(!i915_vma_is_ggtt(vma)); - GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0); + GEM_BUG_ON(!i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND)); - ptr = vma->iomap; + ptr = READ_ONCE(vma->iomap); if (ptr == NULL) { ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->iomap, vma->node.start, @@ -371,7 +431,10 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma) goto err; } - vma->iomap = ptr; + if (unlikely(cmpxchg(&vma->iomap, NULL, ptr))) { + io_mapping_unmap(ptr); + ptr = vma->iomap; + } } __i915_vma_pin(vma); @@ -391,18 +454,12 @@ err: void i915_vma_flush_writes(struct i915_vma *vma) { - if (!i915_vma_has_ggtt_write(vma)) - return; - - intel_gt_flush_ggtt_writes(vma->vm->gt); - - i915_vma_unset_ggtt_write(vma); + if (i915_vma_unset_ggtt_write(vma)) + intel_gt_flush_ggtt_writes(vma->vm->gt); } void i915_vma_unpin_iomap(struct i915_vma *vma) { - lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); - GEM_BUG_ON(vma->iomap == NULL); i915_vma_flush_writes(vma); @@ -438,6 +495,9 @@ bool i915_vma_misplaced(const struct i915_vma *vma, if (!drm_mm_node_allocated(&vma->node)) return false; + if (test_bit(I915_VMA_ERROR_BIT, __i915_vma_flags(vma))) + return true; + if (vma->node.size < size) return true; @@ -472,17 +532,12 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma) mappable = vma->node.start + vma->fence_size <= i915_vm_to_ggtt(vma->vm)->mappable_end; if (mappable && fenceable) - vma->flags |= I915_VMA_CAN_FENCE; + set_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); else - vma->flags &= ~I915_VMA_CAN_FENCE; -} - -static bool color_differs(struct drm_mm_node *node, unsigned long color) -{ - return node->allocated && node->color != color; + clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); } -bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) +bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color) { struct drm_mm_node *node = &vma->node; struct drm_mm_node *other; @@ -494,7 +549,7 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) * these constraints apply and set the drm_mm.color_adjust * appropriately. */ - if (vma->vm->mm.color_adjust == NULL) + if (!i915_vm_has_cache_coloring(vma->vm)) return true; /* Only valid to be called on an already inserted vma */ @@ -502,11 +557,13 @@ bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level) GEM_BUG_ON(list_empty(&node->node_list)); other = list_prev_entry(node, node_list); - if (color_differs(other, cache_level) && !drm_mm_hole_follows(other)) + if (i915_node_color_differs(other, color) && + !drm_mm_hole_follows(other)) return false; other = list_next_entry(node, node_list); - if (color_differs(other, cache_level) && !drm_mm_hole_follows(node)) + if (i915_node_color_differs(other, color) && + !drm_mm_hole_follows(node)) return false; return true; @@ -541,13 +598,12 @@ static void assert_bind_count(const struct drm_i915_gem_object *obj) static int i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) { - struct drm_i915_private *dev_priv = vma->vm->i915; - unsigned int cache_level; + unsigned long color; u64 start, end; int ret; GEM_BUG_ON(i915_vma_is_closed(vma)); - GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); + GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); size = max(size, vma->size); @@ -567,7 +623,7 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) end = vma->vm->total; if (flags & PIN_MAPPABLE) - end = min_t(u64, end, dev_priv->ggtt.mappable_end); + end = min_t(u64, end, i915_vm_to_ggtt(vma->vm)->mappable_end); if (flags & PIN_ZONE_4G) end = min_t(u64, end, (1ULL << 32) - I915_GTT_PAGE_SIZE); GEM_BUG_ON(!IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); @@ -583,35 +639,21 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) return -ENOSPC; } - if (vma->obj) { - ret = i915_gem_object_pin_pages(vma->obj); - if (ret) - return ret; - - cache_level = vma->obj->cache_level; - } else { - cache_level = 0; - } - - GEM_BUG_ON(vma->pages); - - ret = vma->ops->set_pages(vma); - if (ret) - goto err_unpin; + color = 0; + if (vma->obj && i915_vm_has_cache_coloring(vma->vm)) + color = vma->obj->cache_level; if (flags & PIN_OFFSET_FIXED) { u64 offset = flags & PIN_OFFSET_MASK; if (!IS_ALIGNED(offset, alignment) || - range_overflows(offset, size, end)) { - ret = -EINVAL; - goto err_clear; - } + range_overflows(offset, size, end)) + return -EINVAL; ret = i915_gem_gtt_reserve(vma->vm, &vma->node, - size, offset, cache_level, + size, offset, color, flags); if (ret) - goto err_clear; + return ret; } else { /* * We only support huge gtt pages through the 48b PPGTT, @@ -647,116 +689,259 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) } ret = i915_gem_gtt_insert(vma->vm, &vma->node, - size, alignment, cache_level, + size, alignment, color, start, end, flags); if (ret) - goto err_clear; + return ret; GEM_BUG_ON(vma->node.start < start); GEM_BUG_ON(vma->node.start + vma->node.size > end); } GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, cache_level)); - - mutex_lock(&vma->vm->mutex); - list_move_tail(&vma->vm_link, &vma->vm->bound_list); - mutex_unlock(&vma->vm->mutex); + GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, color)); if (vma->obj) { - atomic_inc(&vma->obj->bind_count); - assert_bind_count(vma->obj); + struct drm_i915_gem_object *obj = vma->obj; + + atomic_inc(&obj->bind_count); + assert_bind_count(obj); } + list_add_tail(&vma->vm_link, &vma->vm->bound_list); return 0; - -err_clear: - vma->ops->clear_pages(vma); -err_unpin: - if (vma->obj) - i915_gem_object_unpin_pages(vma->obj); - return ret; } static void -i915_vma_remove(struct i915_vma *vma) +i915_vma_detach(struct i915_vma *vma) { GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); - - vma->ops->clear_pages(vma); - - mutex_lock(&vma->vm->mutex); - drm_mm_remove_node(&vma->node); - list_move_tail(&vma->vm_link, &vma->vm->unbound_list); - mutex_unlock(&vma->vm->mutex); + GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)); /* - * Since the unbound list is global, only move to that list if - * no more VMAs exist. + * And finally now the object is completely decoupled from this + * vma, we can drop its hold on the backing storage and allow + * it to be reaped by the shrinker. */ + list_del(&vma->vm_link); if (vma->obj) { struct drm_i915_gem_object *obj = vma->obj; + assert_bind_count(obj); atomic_dec(&obj->bind_count); + } +} - /* - * And finally now the object is completely decoupled from this - * vma, we can drop its hold on the backing storage and allow - * it to be reaped by the shrinker. - */ - i915_gem_object_unpin_pages(obj); - assert_bind_count(obj); +static bool try_qad_pin(struct i915_vma *vma, unsigned int flags) +{ + unsigned int bound; + bool pinned = true; + + bound = atomic_read(&vma->flags); + do { + if (unlikely(flags & ~bound)) + return false; + + if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) + return false; + + if (!(bound & I915_VMA_PIN_MASK)) + goto unpinned; + + GEM_BUG_ON(((bound + 1) & I915_VMA_PIN_MASK) == 0); + } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); + + return true; + +unpinned: + /* + * If pin_count==0, but we are bound, check under the lock to avoid + * racing with a concurrent i915_vma_unbind(). + */ + mutex_lock(&vma->vm->mutex); + do { + if (unlikely(bound & (I915_VMA_OVERFLOW | I915_VMA_ERROR))) { + pinned = false; + break; + } + + if (unlikely(flags & ~bound)) { + pinned = false; + break; + } + } while (!atomic_try_cmpxchg(&vma->flags, &bound, bound + 1)); + mutex_unlock(&vma->vm->mutex); + + return pinned; +} + +static int vma_get_pages(struct i915_vma *vma) +{ + int err = 0; + + if (atomic_add_unless(&vma->pages_count, 1, 0)) + return 0; + + /* Allocations ahoy! */ + if (mutex_lock_interruptible(&vma->pages_mutex)) + return -EINTR; + + if (!atomic_read(&vma->pages_count)) { + if (vma->obj) { + err = i915_gem_object_pin_pages(vma->obj); + if (err) + goto unlock; + } + + err = vma->ops->set_pages(vma); + if (err) { + if (vma->obj) + i915_gem_object_unpin_pages(vma->obj); + goto unlock; + } } + atomic_inc(&vma->pages_count); + +unlock: + mutex_unlock(&vma->pages_mutex); + + return err; } -int __i915_vma_do_pin(struct i915_vma *vma, - u64 size, u64 alignment, u64 flags) +static void __vma_put_pages(struct i915_vma *vma, unsigned int count) { - const unsigned int bound = vma->flags; - int ret; + /* We allocate under vma_get_pages, so beware the shrinker */ + mutex_lock_nested(&vma->pages_mutex, SINGLE_DEPTH_NESTING); + GEM_BUG_ON(atomic_read(&vma->pages_count) < count); + if (atomic_sub_return(count, &vma->pages_count) == 0) { + vma->ops->clear_pages(vma); + GEM_BUG_ON(vma->pages); + if (vma->obj) + i915_gem_object_unpin_pages(vma->obj); + } + mutex_unlock(&vma->pages_mutex); +} - lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); - GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0); - GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma)); +static void vma_put_pages(struct i915_vma *vma) +{ + if (atomic_add_unless(&vma->pages_count, -1, 1)) + return; - if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) { - ret = -EBUSY; - goto err_unpin; + __vma_put_pages(vma, 1); +} + +static void vma_unbind_pages(struct i915_vma *vma) +{ + unsigned int count; + + lockdep_assert_held(&vma->vm->mutex); + + /* The upper portion of pages_count is the number of bindings */ + count = atomic_read(&vma->pages_count); + count >>= I915_VMA_PAGES_BIAS; + GEM_BUG_ON(!count); + + __vma_put_pages(vma, count | count << I915_VMA_PAGES_BIAS); +} + +int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) +{ + struct i915_vma_work *work = NULL; + unsigned int bound; + int err; + + BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); + BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); + + GEM_BUG_ON(flags & PIN_UPDATE); + GEM_BUG_ON(!(flags & (PIN_USER | PIN_GLOBAL))); + + /* First try and grab the pin without rebinding the vma */ + if (try_qad_pin(vma, flags & I915_VMA_BIND_MASK)) + return 0; + + err = vma_get_pages(vma); + if (err) + return err; + + if (flags & vma->vm->bind_async_flags) { + work = i915_vma_work(); + if (!work) { + err = -ENOMEM; + goto err_pages; + } } - if ((bound & I915_VMA_BIND_MASK) == 0) { - ret = i915_vma_insert(vma, size, alignment, flags); - if (ret) - goto err_unpin; + /* No more allocations allowed once we hold vm->mutex */ + err = mutex_lock_interruptible(&vma->vm->mutex); + if (err) + goto err_fence; + + bound = atomic_read(&vma->flags); + if (unlikely(bound & I915_VMA_ERROR)) { + err = -ENOMEM; + goto err_unlock; } - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - ret = i915_vma_bind(vma, vma->obj ? vma->obj->cache_level : 0, flags); - if (ret) - goto err_remove; + if (unlikely(!((bound + 1) & I915_VMA_PIN_MASK))) { + err = -EAGAIN; /* pins are meant to be fairly temporary */ + goto err_unlock; + } - GEM_BUG_ON((vma->flags & I915_VMA_BIND_MASK) == 0); + if (unlikely(!(flags & ~bound & I915_VMA_BIND_MASK))) { + __i915_vma_pin(vma); + goto err_unlock; + } - if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND) - __i915_vma_set_map_and_fenceable(vma); + err = i915_active_acquire(&vma->active); + if (err) + goto err_unlock; + + if (!(bound & I915_VMA_BIND_MASK)) { + err = i915_vma_insert(vma, size, alignment, flags); + if (err) + goto err_active; + + if (i915_is_ggtt(vma->vm)) + __i915_vma_set_map_and_fenceable(vma); + } + + GEM_BUG_ON(!vma->pages); + err = i915_vma_bind(vma, + vma->obj ? vma->obj->cache_level : 0, + flags, work); + if (err) + goto err_remove; + + /* There should only be at most 2 active bindings (user, global) */ + GEM_BUG_ON(bound + I915_VMA_PAGES_ACTIVE < bound); + atomic_add(I915_VMA_PAGES_ACTIVE, &vma->pages_count); + list_move_tail(&vma->vm_link, &vma->vm->bound_list); + __i915_vma_pin(vma); + GEM_BUG_ON(!i915_vma_is_pinned(vma)); + GEM_BUG_ON(!i915_vma_is_bound(vma, flags)); GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); - return 0; err_remove: - if ((bound & I915_VMA_BIND_MASK) == 0) { - i915_vma_remove(vma); - GEM_BUG_ON(vma->pages); - GEM_BUG_ON(vma->flags & I915_VMA_BIND_MASK); + if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) { + i915_vma_detach(vma); + drm_mm_remove_node(&vma->node); } -err_unpin: - __i915_vma_unpin(vma); - return ret; +err_active: + i915_active_release(&vma->active); +err_unlock: + mutex_unlock(&vma->vm->mutex); +err_fence: + if (work) + dma_fence_work_commit(&work->base); +err_pages: + vma_put_pages(vma); + return err; } void i915_vma_close(struct i915_vma *vma) { - struct drm_i915_private *i915 = vma->vm->i915; + struct intel_gt *gt = vma->vm->gt; unsigned long flags; GEM_BUG_ON(i915_vma_is_closed(vma)); @@ -773,79 +958,87 @@ void i915_vma_close(struct i915_vma *vma) * causing us to rebind the VMA once more. This ends up being a lot * of wasted work for the steady state. */ - spin_lock_irqsave(&i915->gt.closed_lock, flags); - list_add(&vma->closed_link, &i915->gt.closed_vma); - spin_unlock_irqrestore(&i915->gt.closed_lock, flags); + spin_lock_irqsave(>->closed_lock, flags); + list_add(&vma->closed_link, >->closed_vma); + spin_unlock_irqrestore(>->closed_lock, flags); } static void __i915_vma_remove_closed(struct i915_vma *vma) { - struct drm_i915_private *i915 = vma->vm->i915; - - if (!i915_vma_is_closed(vma)) - return; + struct intel_gt *gt = vma->vm->gt; - spin_lock_irq(&i915->gt.closed_lock); + spin_lock_irq(>->closed_lock); list_del_init(&vma->closed_link); - spin_unlock_irq(&i915->gt.closed_lock); + spin_unlock_irq(>->closed_lock); } void i915_vma_reopen(struct i915_vma *vma) { - __i915_vma_remove_closed(vma); + if (i915_vma_is_closed(vma)) + __i915_vma_remove_closed(vma); } -static void __i915_vma_destroy(struct i915_vma *vma) +void i915_vma_destroy(struct i915_vma *vma) { - GEM_BUG_ON(vma->node.allocated); - GEM_BUG_ON(vma->fence); - - mutex_lock(&vma->vm->mutex); - list_del(&vma->vm_link); - mutex_unlock(&vma->vm->mutex); + if (drm_mm_node_allocated(&vma->node)) { + mutex_lock(&vma->vm->mutex); + atomic_and(~I915_VMA_PIN_MASK, &vma->flags); + WARN_ON(__i915_vma_unbind(vma)); + mutex_unlock(&vma->vm->mutex); + GEM_BUG_ON(drm_mm_node_allocated(&vma->node)); + } + GEM_BUG_ON(i915_vma_is_active(vma)); if (vma->obj) { struct drm_i915_gem_object *obj = vma->obj; spin_lock(&obj->vma.lock); list_del(&vma->obj_link); - rb_erase(&vma->obj_node, &vma->obj->vma.tree); + rb_erase(&vma->obj_node, &obj->vma.tree); spin_unlock(&obj->vma.lock); } - i915_active_fini(&vma->active); + __i915_vma_remove_closed(vma); + i915_vm_put(vma->vm); + i915_active_fini(&vma->active); i915_vma_free(vma); } -void i915_vma_destroy(struct i915_vma *vma) +void i915_vma_parked(struct intel_gt *gt) { - lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); + struct i915_vma *vma, *next; - GEM_BUG_ON(i915_vma_is_pinned(vma)); + spin_lock_irq(>->closed_lock); + list_for_each_entry_safe(vma, next, >->closed_vma, closed_link) { + struct drm_i915_gem_object *obj = vma->obj; + struct i915_address_space *vm = vma->vm; - __i915_vma_remove_closed(vma); + /* XXX All to avoid keeping a reference on i915_vma itself */ - WARN_ON(i915_vma_unbind(vma)); - GEM_BUG_ON(i915_vma_is_active(vma)); + if (!kref_get_unless_zero(&obj->base.refcount)) + continue; - __i915_vma_destroy(vma); -} + if (!i915_vm_tryopen(vm)) { + i915_gem_object_put(obj); + obj = NULL; + } -void i915_vma_parked(struct drm_i915_private *i915) -{ - struct i915_vma *vma, *next; + spin_unlock_irq(>->closed_lock); - spin_lock_irq(&i915->gt.closed_lock); - list_for_each_entry_safe(vma, next, &i915->gt.closed_vma, closed_link) { - list_del_init(&vma->closed_link); - spin_unlock_irq(&i915->gt.closed_lock); + if (obj) { + i915_vma_destroy(vma); + i915_gem_object_put(obj); + } - i915_vma_destroy(vma); + i915_vm_close(vm); - spin_lock_irq(&i915->gt.closed_lock); + /* Restart after dropping lock */ + spin_lock_irq(>->closed_lock); + next = list_first_entry(>->closed_vma, + typeof(*next), closed_link); } - spin_unlock_irq(&i915->gt.closed_lock); + spin_unlock_irq(>->closed_lock); } static void __i915_vma_iounmap(struct i915_vma *vma) @@ -883,6 +1076,20 @@ void i915_vma_revoke_mmap(struct i915_vma *vma) list_del(&vma->obj->userfault_link); } +int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq) +{ + int err; + + GEM_BUG_ON(!i915_vma_is_pinned(vma)); + + /* Wait for the vma to be bound before we start! */ + err = i915_request_await_active(rq, &vma->active); + if (err) + return err; + + return i915_active_add_request(&vma->active, rq); +} + int i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, unsigned int flags) @@ -890,27 +1097,15 @@ int i915_vma_move_to_active(struct i915_vma *vma, struct drm_i915_gem_object *obj = vma->obj; int err; - assert_vma_held(vma); assert_object_held(obj); - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - /* - * Add a reference if we're newly entering the active list. - * The order in which we add operations to the retirement queue is - * vital here: mark_active adds to the start of the callback list, - * such that subsequent callbacks are called first. Therefore we - * add the active reference first and queue for it to be dropped - * *last*. - */ - err = i915_active_ref(&vma->active, rq->timeline, rq); + err = __i915_vma_move_to_active(vma, rq); if (unlikely(err)) return err; if (flags & EXEC_OBJECT_WRITE) { if (intel_frontbuffer_invalidate(obj->frontbuffer, ORIGIN_CS)) - i915_active_ref(&obj->frontbuffer->write, - rq->timeline, - rq); + i915_active_add_request(&obj->frontbuffer->write, rq); dma_resv_add_excl_fence(vma->resv, &rq->fence); obj->write_domain = I915_GEM_DOMAIN_RENDER; @@ -930,44 +1125,31 @@ int i915_vma_move_to_active(struct i915_vma *vma, return 0; } -int i915_vma_unbind(struct i915_vma *vma) +int __i915_vma_unbind(struct i915_vma *vma) { int ret; - lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); + lockdep_assert_held(&vma->vm->mutex); /* * First wait upon any activity as retiring the request may * have side-effects such as unpinning or even unbinding this vma. + * + * XXX Actually waiting under the vm->mutex is a hinderance and + * should be pipelined wherever possible. In cases where that is + * unavoidable, we should lift the wait to before the mutex. */ - might_sleep(); - if (i915_vma_is_active(vma)) { - /* - * When a closed VMA is retired, it is unbound - eek. - * In order to prevent it from being recursively closed, - * take a pin on the vma so that the second unbind is - * aborted. - * - * Even more scary is that the retire callback may free - * the object (last active vma). To prevent the explosion - * we defer the actual object free to a worker that can - * only proceed once it acquires the struct_mutex (which - * we currently hold, therefore it cannot free this object - * before we are finished). - */ - __i915_vma_pin(vma); - ret = i915_active_wait(&vma->active); - __i915_vma_unpin(vma); - if (ret) - return ret; - } - GEM_BUG_ON(i915_vma_is_active(vma)); + ret = i915_vma_sync(vma); + if (ret) + return ret; + GEM_BUG_ON(i915_vma_is_active(vma)); if (i915_vma_is_pinned(vma)) { vma_print_allocator(vma, "is pinned"); return -EBUSY; } + GEM_BUG_ON(i915_vma_is_active(vma)); if (!drm_mm_node_allocated(&vma->node)) return 0; @@ -982,34 +1164,47 @@ int i915_vma_unbind(struct i915_vma *vma) GEM_BUG_ON(i915_vma_has_ggtt_write(vma)); /* release the fence reg _after_ flushing */ - mutex_lock(&vma->vm->mutex); ret = i915_vma_revoke_fence(vma); - mutex_unlock(&vma->vm->mutex); if (ret) return ret; /* Force a pagefault for domain tracking on next user access */ - mutex_lock(&vma->vm->mutex); i915_vma_revoke_mmap(vma); - mutex_unlock(&vma->vm->mutex); __i915_vma_iounmap(vma); - vma->flags &= ~I915_VMA_CAN_FENCE; + clear_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); } GEM_BUG_ON(vma->fence); GEM_BUG_ON(i915_vma_has_userfault(vma)); - if (likely(!vma->vm->closed)) { + if (likely(atomic_read(&vma->vm->open))) { trace_i915_vma_unbind(vma); vma->ops->unbind_vma(vma); } - vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND); + atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags); - i915_vma_remove(vma); + i915_vma_detach(vma); + vma_unbind_pages(vma); + drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */ return 0; } +int i915_vma_unbind(struct i915_vma *vma) +{ + struct i915_address_space *vm = vma->vm; + int err; + + err = mutex_lock_interruptible(&vm->mutex); + if (err) + return err; + + err = __i915_vma_unbind(vma); + mutex_unlock(&vm->mutex); + + return err; +} + struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma) { i915_gem_object_make_unshrinkable(vma->obj); diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h index 889fc7cb910a..465932813bc5 100644 --- a/drivers/gpu/drm/i915/i915_vma.h +++ b/drivers/gpu/drm/i915/i915_vma.h @@ -72,7 +72,7 @@ struct i915_vma { * that exist in the ctx->handle_vmas LUT for this vma. */ atomic_t open_count; - unsigned long flags; + atomic_t flags; /** * How many users have pinned this object in GTT space. * @@ -96,22 +96,41 @@ struct i915_vma { * exclusive cachelines of a single page, so a maximum of 64 possible * users. */ -#define I915_VMA_PIN_MASK 0xff -#define I915_VMA_PIN_OVERFLOW BIT(8) +#define I915_VMA_PIN_MASK 0x3ff +#define I915_VMA_OVERFLOW 0x200 /** Flags and address space this VMA is bound to */ -#define I915_VMA_GLOBAL_BIND BIT(9) -#define I915_VMA_LOCAL_BIND BIT(10) -#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND | I915_VMA_PIN_OVERFLOW) +#define I915_VMA_GLOBAL_BIND_BIT 10 +#define I915_VMA_LOCAL_BIND_BIT 11 -#define I915_VMA_GGTT BIT(11) -#define I915_VMA_CAN_FENCE BIT(12) -#define I915_VMA_USERFAULT_BIT 13 -#define I915_VMA_USERFAULT BIT(I915_VMA_USERFAULT_BIT) -#define I915_VMA_GGTT_WRITE BIT(14) +#define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT)) +#define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT)) + +#define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND) + +#define I915_VMA_ALLOC_BIT 12 +#define I915_VMA_ALLOC ((int)BIT(I915_VMA_ALLOC_BIT)) + +#define I915_VMA_ERROR_BIT 13 +#define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT)) + +#define I915_VMA_GGTT_BIT 14 +#define I915_VMA_CAN_FENCE_BIT 15 +#define I915_VMA_USERFAULT_BIT 16 +#define I915_VMA_GGTT_WRITE_BIT 17 + +#define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT)) +#define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT)) +#define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT)) +#define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT)) struct i915_active active; +#define I915_VMA_PAGES_BIAS 24 +#define I915_VMA_PAGES_ACTIVE (BIT(24) | 1) + atomic_t pages_count; /* number of active binds to the pages */ + struct mutex pages_mutex; /* protect acquire/release of backing pages */ + /** * Support different GGTT views into the same object. * This means there can be multiple VMA mappings per object and per VM. @@ -158,52 +177,57 @@ static inline bool i915_vma_is_active(const struct i915_vma *vma) return !i915_active_is_idle(&vma->active); } +int __must_check __i915_vma_move_to_active(struct i915_vma *vma, + struct i915_request *rq); int __must_check i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq, unsigned int flags); +#define __i915_vma_flags(v) ((unsigned long *)&(v)->flags.counter) + static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) { - return vma->flags & I915_VMA_GGTT; + return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(vma)); } static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma) { - return vma->flags & I915_VMA_GGTT_WRITE; + return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma)); } static inline void i915_vma_set_ggtt_write(struct i915_vma *vma) { GEM_BUG_ON(!i915_vma_is_ggtt(vma)); - vma->flags |= I915_VMA_GGTT_WRITE; + set_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags(vma)); } -static inline void i915_vma_unset_ggtt_write(struct i915_vma *vma) +static inline bool i915_vma_unset_ggtt_write(struct i915_vma *vma) { - vma->flags &= ~I915_VMA_GGTT_WRITE; + return test_and_clear_bit(I915_VMA_GGTT_WRITE_BIT, + __i915_vma_flags(vma)); } void i915_vma_flush_writes(struct i915_vma *vma); static inline bool i915_vma_is_map_and_fenceable(const struct i915_vma *vma) { - return vma->flags & I915_VMA_CAN_FENCE; + return test_bit(I915_VMA_CAN_FENCE_BIT, __i915_vma_flags(vma)); } static inline bool i915_vma_set_userfault(struct i915_vma *vma) { GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); - return __test_and_set_bit(I915_VMA_USERFAULT_BIT, &vma->flags); + return test_and_set_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma)); } static inline void i915_vma_unset_userfault(struct i915_vma *vma) { - return __clear_bit(I915_VMA_USERFAULT_BIT, &vma->flags); + return clear_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma)); } static inline bool i915_vma_has_userfault(const struct i915_vma *vma) { - return test_bit(I915_VMA_USERFAULT_BIT, &vma->flags); + return test_bit(I915_VMA_USERFAULT_BIT, __i915_vma_flags(vma)); } static inline bool i915_vma_is_closed(const struct i915_vma *vma) @@ -214,7 +238,7 @@ static inline bool i915_vma_is_closed(const struct i915_vma *vma) static inline u32 i915_ggtt_offset(const struct i915_vma *vma) { GEM_BUG_ON(!i915_vma_is_ggtt(vma)); - GEM_BUG_ON(!vma->node.allocated); + GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); GEM_BUG_ON(upper_32_bits(vma->node.start)); GEM_BUG_ON(upper_32_bits(vma->node.start + vma->node.size - 1)); return lower_32_bits(vma->node.start); @@ -293,13 +317,18 @@ i915_vma_compare(struct i915_vma *vma, return memcmp(&vma->ggtt_view.partial, &view->partial, view->type); } -int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, - u32 flags); -bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long cache_level); +struct i915_vma_work *i915_vma_work(void); +int i915_vma_bind(struct i915_vma *vma, + enum i915_cache_level cache_level, + u32 flags, + struct i915_vma_work *work); + +bool i915_gem_valid_gtt_space(struct i915_vma *vma, unsigned long color); bool i915_vma_misplaced(const struct i915_vma *vma, u64 size, u64 alignment, u64 flags); void __i915_vma_set_map_and_fenceable(struct i915_vma *vma); void i915_vma_revoke_mmap(struct i915_vma *vma); +int __i915_vma_unbind(struct i915_vma *vma); int __must_check i915_vma_unbind(struct i915_vma *vma); void i915_vma_unlink_ctx(struct i915_vma *vma); void i915_vma_close(struct i915_vma *vma); @@ -318,30 +347,12 @@ static inline void i915_vma_unlock(struct i915_vma *vma) dma_resv_unlock(vma->resv); } -int __i915_vma_do_pin(struct i915_vma *vma, - u64 size, u64 alignment, u64 flags); -static inline int __must_check -i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags) -{ - BUILD_BUG_ON(PIN_MBZ != I915_VMA_PIN_OVERFLOW); - BUILD_BUG_ON(PIN_GLOBAL != I915_VMA_GLOBAL_BIND); - BUILD_BUG_ON(PIN_USER != I915_VMA_LOCAL_BIND); - - /* Pin early to prevent the shrinker/eviction logic from destroying - * our vma as we insert and bind. - */ - if (likely(((++vma->flags ^ flags) & I915_VMA_BIND_MASK) == 0)) { - GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); - GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags)); - return 0; - } - - return __i915_vma_do_pin(vma, size, alignment, flags); -} +int __must_check +i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags); static inline int i915_vma_pin_count(const struct i915_vma *vma) { - return vma->flags & I915_VMA_PIN_MASK; + return atomic_read(&vma->flags) & I915_VMA_PIN_MASK; } static inline bool i915_vma_is_pinned(const struct i915_vma *vma) @@ -351,18 +362,18 @@ static inline bool i915_vma_is_pinned(const struct i915_vma *vma) static inline void __i915_vma_pin(struct i915_vma *vma) { - vma->flags++; - GEM_BUG_ON(vma->flags & I915_VMA_PIN_OVERFLOW); + atomic_inc(&vma->flags); + GEM_BUG_ON(!i915_vma_is_pinned(vma)); } static inline void __i915_vma_unpin(struct i915_vma *vma) { - vma->flags--; + GEM_BUG_ON(!i915_vma_is_pinned(vma)); + atomic_dec(&vma->flags); } static inline void i915_vma_unpin(struct i915_vma *vma) { - GEM_BUG_ON(!i915_vma_is_pinned(vma)); GEM_BUG_ON(!drm_mm_node_allocated(&vma->node)); __i915_vma_unpin(vma); } @@ -370,7 +381,13 @@ static inline void i915_vma_unpin(struct i915_vma *vma) static inline bool i915_vma_is_bound(const struct i915_vma *vma, unsigned int where) { - return vma->flags & where; + return atomic_read(&vma->flags) & where; +} + +static inline bool i915_node_color_differs(const struct drm_mm_node *node, + unsigned long color) +{ + return drm_mm_node_allocated(node) && node->color != color; } /** @@ -382,8 +399,6 @@ static inline bool i915_vma_is_bound(const struct i915_vma *vma, * the caller must call i915_vma_unpin_iomap to relinquish the pinning * after the iomapping is no longer required. * - * Callers must hold the struct_mutex. - * * Returns a valid iomapped pointer or ERR_PTR. */ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); @@ -395,8 +410,8 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma); * * Unpins the previously iomapped VMA from i915_vma_pin_iomap(). * - * Callers must hold the struct_mutex. This function is only valid to be - * called on a VMA previously iomapped by the caller with i915_vma_pin_iomap(). + * This function is only valid to be called on a VMA previously + * iomapped by the caller with i915_vma_pin_iomap(). */ void i915_vma_unpin_iomap(struct i915_vma *vma); @@ -424,6 +439,8 @@ static inline struct page *i915_vma_first_page(struct i915_vma *vma) int __must_check i915_vma_pin_fence(struct i915_vma *vma); int __must_check i915_vma_revoke_fence(struct i915_vma *vma); +int __i915_vma_pin_fence(struct i915_vma *vma); + static inline void __i915_vma_unpin_fence(struct i915_vma *vma) { GEM_BUG_ON(atomic_read(&vma->fence->pin_count) <= 0); @@ -441,12 +458,11 @@ static inline void __i915_vma_unpin_fence(struct i915_vma *vma) static inline void i915_vma_unpin_fence(struct i915_vma *vma) { - /* lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); */ if (vma->fence) __i915_vma_unpin_fence(vma); } -void i915_vma_parked(struct drm_i915_private *i915); +void i915_vma_parked(struct intel_gt *gt); #define for_each_until(cond) if (cond) break; else @@ -470,4 +486,10 @@ struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma); void i915_vma_make_shrinkable(struct i915_vma *vma); void i915_vma_make_purgeable(struct i915_vma *vma); +static inline int i915_vma_sync(struct i915_vma *vma) +{ + /* Wait for the asynchronous bindings and pending GPU reads */ + return i915_active_wait(&vma->active); +} + #endif diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c index 546577e39b4e..09870a31b4f0 100644 --- a/drivers/gpu/drm/i915/intel_csr.c +++ b/drivers/gpu/drm/i915/intel_csr.c @@ -44,8 +44,8 @@ #define TGL_CSR_MAX_FW_SIZE 0x6000 MODULE_FIRMWARE(TGL_CSR_PATH); -#define ICL_CSR_PATH "i915/icl_dmc_ver1_07.bin" -#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 7) +#define ICL_CSR_PATH "i915/icl_dmc_ver1_09.bin" +#define ICL_CSR_VERSION_REQUIRED CSR_VERSION(1, 9) #define ICL_CSR_MAX_FW_SIZE 0x6000 MODULE_FIRMWARE(ICL_CSR_PATH); diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index d0ed44d33484..a5b571364cf6 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -93,9 +93,9 @@ static void sseu_dump(const struct sseu_dev_info *sseu, struct drm_printer *p) hweight8(sseu->slice_mask), sseu->slice_mask); drm_printf(p, "subslice total: %u\n", intel_sseu_subslice_total(sseu)); for (s = 0; s < sseu->max_slices; s++) { - drm_printf(p, "slice%d: %u subslices, mask=%04x\n", + drm_printf(p, "slice%d: %u subslices, mask=%08x\n", s, intel_sseu_subslices_per_slice(sseu, s), - sseu->subslice_mask[s]); + intel_sseu_get_subslices(sseu, s)); } drm_printf(p, "EU total: %u\n", sseu->eu_total); drm_printf(p, "EU per subslice: %u\n", sseu->eu_per_subslice); @@ -118,10 +118,9 @@ void intel_device_info_dump_runtime(const struct intel_runtime_info *info, static int sseu_eu_idx(const struct sseu_dev_info *sseu, int slice, int subslice) { - int subslice_stride = GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); - int slice_stride = sseu->max_subslices * subslice_stride; + int slice_stride = sseu->max_subslices * sseu->eu_stride; - return slice * slice_stride + subslice * subslice_stride; + return slice * slice_stride + subslice * sseu->eu_stride; } static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, @@ -130,7 +129,7 @@ static u16 sseu_get_eus(const struct sseu_dev_info *sseu, int slice, int i, offset = sseu_eu_idx(sseu, slice, subslice); u16 eu_mask = 0; - for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { + for (i = 0; i < sseu->eu_stride; i++) { eu_mask |= ((u16)sseu->eu_mask[offset + i]) << (i * BITS_PER_BYTE); } @@ -143,7 +142,7 @@ static void sseu_set_eus(struct sseu_dev_info *sseu, int slice, int subslice, { int i, offset = sseu_eu_idx(sseu, slice, subslice); - for (i = 0; i < GEN_SSEU_STRIDE(sseu->max_eus_per_subslice); i++) { + for (i = 0; i < sseu->eu_stride; i++) { sseu->eu_mask[offset + i] = (eu_mask >> (BITS_PER_BYTE * i)) & 0xff; } @@ -160,9 +159,9 @@ void intel_device_info_dump_topology(const struct sseu_dev_info *sseu, } for (s = 0; s < sseu->max_slices; s++) { - drm_printf(p, "slice%d: %u subslice(s) (0x%hhx):\n", + drm_printf(p, "slice%d: %u subslice(s) (0x%08x):\n", s, intel_sseu_subslices_per_slice(sseu, s), - sseu->subslice_mask[s]); + intel_sseu_get_subslices(sseu, s)); for (ss = 0; ss < sseu->max_subslices; ss++) { u16 enabled_eus = sseu_get_eus(sseu, s, ss); @@ -183,44 +182,80 @@ static u16 compute_eu_total(const struct sseu_dev_info *sseu) return total; } +static void gen11_compute_sseu_info(struct sseu_dev_info *sseu, + u8 s_en, u32 ss_en, u16 eu_en) +{ + int s, ss; + + /* ss_en represents entire subslice mask across all slices */ + GEM_BUG_ON(sseu->max_slices * sseu->max_subslices > + sizeof(ss_en) * BITS_PER_BYTE); + + for (s = 0; s < sseu->max_slices; s++) { + if ((s_en & BIT(s)) == 0) + continue; + + sseu->slice_mask |= BIT(s); + + intel_sseu_set_subslices(sseu, s, ss_en); + + for (ss = 0; ss < sseu->max_subslices; ss++) + if (intel_sseu_has_subslice(sseu, s, ss)) + sseu_set_eus(sseu, s, ss, eu_en); + } + sseu->eu_per_subslice = hweight16(eu_en); + sseu->eu_total = compute_eu_total(sseu); +} + +static void gen12_sseu_info_init(struct drm_i915_private *dev_priv) +{ + struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; + u8 s_en; + u32 dss_en; + u16 eu_en = 0; + u8 eu_en_fuse; + int eu; + + /* + * Gen12 has Dual-Subslices, which behave similarly to 2 gen11 SS. + * Instead of splitting these, provide userspace with an array + * of DSS to more closely represent the hardware resource. + */ + intel_sseu_set_info(sseu, 1, 6, 16); + + s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; + + dss_en = I915_READ(GEN12_GT_DSS_ENABLE); + + /* one bit per pair of EUs */ + eu_en_fuse = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK); + for (eu = 0; eu < sseu->max_eus_per_subslice / 2; eu++) + if (eu_en_fuse & BIT(eu)) + eu_en |= BIT(eu * 2) | BIT(eu * 2 + 1); + + gen11_compute_sseu_info(sseu, s_en, dss_en, eu_en); + + /* TGL only supports slice-level power gating */ + sseu->has_slice_pg = 1; +} + static void gen11_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u8 s_en; - u32 ss_en, ss_en_mask; + u32 ss_en; u8 eu_en; - int s; - if (IS_ELKHARTLAKE(dev_priv)) { - sseu->max_slices = 1; - sseu->max_subslices = 4; - sseu->max_eus_per_subslice = 8; - } else { - sseu->max_slices = 1; - sseu->max_subslices = 8; - sseu->max_eus_per_subslice = 8; - } + if (IS_ELKHARTLAKE(dev_priv)) + intel_sseu_set_info(sseu, 1, 4, 8); + else + intel_sseu_set_info(sseu, 1, 8, 8); s_en = I915_READ(GEN11_GT_SLICE_ENABLE) & GEN11_GT_S_ENA_MASK; ss_en = ~I915_READ(GEN11_GT_SUBSLICE_DISABLE); - ss_en_mask = BIT(sseu->max_subslices) - 1; eu_en = ~(I915_READ(GEN11_EU_DISABLE) & GEN11_EU_DIS_MASK); - for (s = 0; s < sseu->max_slices; s++) { - if (s_en & BIT(s)) { - int ss_idx = sseu->max_subslices * s; - int ss; - - sseu->slice_mask |= BIT(s); - sseu->subslice_mask[s] = (ss_en >> ss_idx) & ss_en_mask; - for (ss = 0; ss < sseu->max_subslices; ss++) { - if (sseu->subslice_mask[s] & BIT(ss)) - sseu_set_eus(sseu, s, ss, eu_en); - } - } - } - sseu->eu_per_subslice = hweight8(eu_en); - sseu->eu_total = compute_eu_total(sseu); + gen11_compute_sseu_info(sseu, s_en, ss_en, eu_en); /* ICL has no power gating restrictions. */ sseu->has_slice_pg = 1; @@ -236,23 +271,10 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) const int eu_mask = 0xff; u32 subslice_mask, eu_en; + intel_sseu_set_info(sseu, 6, 4, 8); + sseu->slice_mask = (fuse2 & GEN10_F2_S_ENA_MASK) >> GEN10_F2_S_ENA_SHIFT; - sseu->max_slices = 6; - sseu->max_subslices = 4; - sseu->max_eus_per_subslice = 8; - - subslice_mask = (1 << 4) - 1; - subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> - GEN10_F2_SS_DIS_SHIFT); - - /* - * Slice0 can have up to 3 subslices, but there are only 2 in - * slice1/2. - */ - sseu->subslice_mask[0] = subslice_mask; - for (s = 1; s < sseu->max_slices; s++) - sseu->subslice_mask[s] = subslice_mask & 0x3; /* Slice0 */ eu_en = ~I915_READ(GEN8_EU_DISABLE0); @@ -277,14 +299,25 @@ static void gen10_sseu_info_init(struct drm_i915_private *dev_priv) eu_en = ~I915_READ(GEN10_EU_DISABLE3); sseu_set_eus(sseu, 5, 1, eu_en & eu_mask); - /* Do a second pass where we mark the subslices disabled if all their - * eus are off. - */ + subslice_mask = (1 << 4) - 1; + subslice_mask &= ~((fuse2 & GEN10_F2_SS_DIS_MASK) >> + GEN10_F2_SS_DIS_SHIFT); + for (s = 0; s < sseu->max_slices; s++) { + u32 subslice_mask_with_eus = subslice_mask; + for (ss = 0; ss < sseu->max_subslices; ss++) { if (sseu_get_eus(sseu, s, ss) == 0) - sseu->subslice_mask[s] &= ~BIT(ss); + subslice_mask_with_eus &= ~BIT(ss); } + + /* + * Slice0 can have up to 3 subslices, but there are only 2 in + * slice1/2. + */ + intel_sseu_set_subslices(sseu, s, s == 0 ? + subslice_mask_with_eus : + subslice_mask_with_eus & 0x3); } sseu->eu_total = compute_eu_total(sseu); @@ -310,13 +343,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 fuse; + u8 subslice_mask = 0; fuse = I915_READ(CHV_FUSE_GT); sseu->slice_mask = BIT(0); - sseu->max_slices = 1; - sseu->max_subslices = 2; - sseu->max_eus_per_subslice = 8; + intel_sseu_set_info(sseu, 1, 2, 8); if (!(fuse & CHV_FGT_DISABLE_SS0)) { u8 disabled_mask = @@ -325,7 +357,7 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) (((fuse & CHV_FGT_EU_DIS_SS0_R1_MASK) >> CHV_FGT_EU_DIS_SS0_R1_SHIFT) << 4); - sseu->subslice_mask[0] |= BIT(0); + subslice_mask |= BIT(0); sseu_set_eus(sseu, 0, 0, ~disabled_mask); } @@ -336,10 +368,12 @@ static void cherryview_sseu_info_init(struct drm_i915_private *dev_priv) (((fuse & CHV_FGT_EU_DIS_SS1_R1_MASK) >> CHV_FGT_EU_DIS_SS1_R1_SHIFT) << 4); - sseu->subslice_mask[0] |= BIT(1); + subslice_mask |= BIT(1); sseu_set_eus(sseu, 0, 1, ~disabled_mask); } + intel_sseu_set_subslices(sseu, 0, subslice_mask); + sseu->eu_total = compute_eu_total(sseu); /* @@ -372,9 +406,8 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; /* BXT has a single slice and at most 3 subslices. */ - sseu->max_slices = IS_GEN9_LP(dev_priv) ? 1 : 3; - sseu->max_subslices = IS_GEN9_LP(dev_priv) ? 3 : 4; - sseu->max_eus_per_subslice = 8; + intel_sseu_set_info(sseu, IS_GEN9_LP(dev_priv) ? 1 : 3, + IS_GEN9_LP(dev_priv) ? 3 : 4, 8); /* * The subslice disable field is global, i.e. it applies @@ -393,14 +426,14 @@ static void gen9_sseu_info_init(struct drm_i915_private *dev_priv) /* skip disabled slice */ continue; - sseu->subslice_mask[s] = subslice_mask; + intel_sseu_set_subslices(sseu, s, subslice_mask); eu_disable = I915_READ(GEN9_EU_DISABLE(s)); for (ss = 0; ss < sseu->max_subslices; ss++) { int eu_per_ss; u8 eu_disabled_mask; - if (!(sseu->subslice_mask[s] & BIT(ss))) + if (!intel_sseu_has_subslice(sseu, s, ss)) /* skip disabled subslice */ continue; @@ -473,9 +506,7 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) fuse2 = I915_READ(GEN8_FUSE2); sseu->slice_mask = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; - sseu->max_slices = 3; - sseu->max_subslices = 3; - sseu->max_eus_per_subslice = 8; + intel_sseu_set_info(sseu, 3, 3, 8); /* * The subslice disable field is global, i.e. it applies @@ -502,13 +533,13 @@ static void broadwell_sseu_info_init(struct drm_i915_private *dev_priv) /* skip disabled slice */ continue; - sseu->subslice_mask[s] = subslice_mask; + intel_sseu_set_subslices(sseu, s, subslice_mask); for (ss = 0; ss < sseu->max_subslices; ss++) { u8 eu_disabled_mask; u32 n_disabled; - if (!(sseu->subslice_mask[s] & BIT(ss))) + if (!intel_sseu_has_subslice(sseu, s, ss)) /* skip disabled subslice */ continue; @@ -552,6 +583,7 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) { struct sseu_dev_info *sseu = &RUNTIME_INFO(dev_priv)->sseu; u32 fuse1; + u8 subslice_mask = 0; int s, ss; /* @@ -564,22 +596,18 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) /* fall through */ case 1: sseu->slice_mask = BIT(0); - sseu->subslice_mask[0] = BIT(0); + subslice_mask = BIT(0); break; case 2: sseu->slice_mask = BIT(0); - sseu->subslice_mask[0] = BIT(0) | BIT(1); + subslice_mask = BIT(0) | BIT(1); break; case 3: sseu->slice_mask = BIT(0) | BIT(1); - sseu->subslice_mask[0] = BIT(0) | BIT(1); - sseu->subslice_mask[1] = BIT(0) | BIT(1); + subslice_mask = BIT(0) | BIT(1); break; } - sseu->max_slices = hweight8(sseu->slice_mask); - sseu->max_subslices = hweight8(sseu->subslice_mask[0]); - fuse1 = I915_READ(HSW_PAVP_FUSE1); switch ((fuse1 & HSW_F1_EU_DIS_MASK) >> HSW_F1_EU_DIS_SHIFT) { default: @@ -596,9 +624,14 @@ static void haswell_sseu_info_init(struct drm_i915_private *dev_priv) sseu->eu_per_subslice = 6; break; } - sseu->max_eus_per_subslice = sseu->eu_per_subslice; + + intel_sseu_set_info(sseu, hweight8(sseu->slice_mask), + hweight8(subslice_mask), + sseu->eu_per_subslice); for (s = 0; s < sseu->max_slices; s++) { + intel_sseu_set_subslices(sseu, s, subslice_mask); + for (ss = 0; ss < sseu->max_subslices; ss++) { sseu_set_eus(sseu, s, ss, (1UL << sseu->eu_per_subslice) - 1); @@ -900,12 +933,8 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) runtime->num_sprites[pipe] = 1; } - if (i915_modparams.disable_display) { - DRM_INFO("Display disabled (module parameter)\n"); - info->num_pipes = 0; - } else if (HAS_DISPLAY(dev_priv) && - (IS_GEN_RANGE(dev_priv, 7, 8)) && - HAS_PCH_SPLIT(dev_priv)) { + if (HAS_DISPLAY(dev_priv) && IS_GEN_RANGE(dev_priv, 7, 8) && + HAS_PCH_SPLIT(dev_priv)) { u32 fuse_strap = I915_READ(FUSE_STRAP); u32 sfuse_strap = I915_READ(SFUSE_STRAP); @@ -923,14 +952,14 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) (HAS_PCH_CPT(dev_priv) && !(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) { DRM_INFO("Display fused off, disabling\n"); - info->num_pipes = 0; + info->pipe_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { DRM_INFO("PipeC fused off\n"); - info->num_pipes -= 1; + info->pipe_mask &= ~BIT(PIPE_C); } } else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) { u32 dfsm = I915_READ(SKL_DFSM); - u8 enabled_mask = BIT(info->num_pipes) - 1; + u8 enabled_mask = info->pipe_mask; if (dfsm & SKL_DFSM_PIPE_A_DISABLE) enabled_mask &= ~BIT(PIPE_A); @@ -951,7 +980,20 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n", enabled_mask); else - info->num_pipes = hweight8(enabled_mask); + info->pipe_mask = enabled_mask; + + if (dfsm & SKL_DFSM_DISPLAY_HDCP_DISABLE) + info->display.has_hdcp = 0; + + if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) + info->display.has_fbc = 0; + + if (INTEL_GEN(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) + info->display.has_csr = 0; + + if (INTEL_GEN(dev_priv) >= 10 && + (dfsm & CNL_DFSM_DISPLAY_DSC_DISABLE)) + info->display.has_dsc = 0; } /* Initialize slice/subslice/EU info */ @@ -965,8 +1007,10 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) gen9_sseu_info_init(dev_priv); else if (IS_GEN(dev_priv, 10)) gen10_sseu_info_init(dev_priv); - else if (INTEL_GEN(dev_priv) >= 11) + else if (IS_GEN(dev_priv, 11)) gen11_sseu_info_init(dev_priv); + else if (INTEL_GEN(dev_priv) >= 12) + gen12_sseu_info_init(dev_priv); if (IS_GEN(dev_priv, 6) && intel_vtd_active()) { DRM_INFO("Disabling ppGTT for VT-d support\n"); @@ -1010,8 +1054,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) GEN11_GT_VEBOX_DISABLE_SHIFT; for (i = 0; i < I915_MAX_VCS; i++) { - if (!HAS_ENGINE(dev_priv, _VCS(i))) + if (!HAS_ENGINE(dev_priv, _VCS(i))) { + vdbox_mask &= ~BIT(i); continue; + } if (!(BIT(i) & vdbox_mask)) { info->engine_mask &= ~BIT(_VCS(i)); @@ -1032,8 +1078,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv) GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv)); for (i = 0; i < I915_MAX_VECS; i++) { - if (!HAS_ENGINE(dev_priv, _VECS(i))) + if (!HAS_ENGINE(dev_priv, _VECS(i))) { + vebox_mask &= ~BIT(i); continue; + } if (!(BIT(i) & vebox_mask)) { info->engine_mask &= ~BIT(_VECS(i)); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 92e0c2e0954c..4bdf8a6cfb47 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -107,6 +107,7 @@ enum intel_ppgtt_type { func(is_mobile); \ func(is_lp); \ func(require_force_probe); \ + func(is_dgfx); \ /* Keep has_* in alphabetical order */ \ func(has_64bit_reloc); \ func(gpu_reset_clobbers_display); \ @@ -135,8 +136,11 @@ enum intel_ppgtt_type { func(has_csr); \ func(has_ddi); \ func(has_dp_mst); \ + func(has_dsb); \ + func(has_dsc); \ func(has_fbc); \ func(has_gmch); \ + func(has_hdcp); \ func(has_hotplug); \ func(has_ipc); \ func(has_modular_fia); \ @@ -159,9 +163,11 @@ struct intel_device_info { unsigned int page_sizes; /* page sizes supported by the HW */ + u32 memory_regions; /* regions supported by the HW */ + u32 display_mmio_offset; - u8 num_pipes; + u8 pipe_mask; #define DEFINE_FLAG(name) u8 name:1 DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG); diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c new file mode 100644 index 000000000000..baaeaecc64af --- /dev/null +++ b/drivers/gpu/drm/i915/intel_memory_region.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "intel_memory_region.h" +#include "i915_drv.h" + +/* XXX: Hysterical raisins. BIT(inst) needs to just be (inst) at some point. */ +#define REGION_MAP(type, inst) \ + BIT((type) + INTEL_MEMORY_TYPE_SHIFT) | BIT(inst) + +const u32 intel_region_map[] = { + [INTEL_REGION_SMEM] = REGION_MAP(INTEL_MEMORY_SYSTEM, 0), + [INTEL_REGION_LMEM] = REGION_MAP(INTEL_MEMORY_LOCAL, 0), + [INTEL_REGION_STOLEN] = REGION_MAP(INTEL_MEMORY_STOLEN, 0), +}; + +static u64 +intel_memory_region_free_pages(struct intel_memory_region *mem, + struct list_head *blocks) +{ + struct i915_buddy_block *block, *on; + u64 size = 0; + + list_for_each_entry_safe(block, on, blocks, link) { + size += i915_buddy_block_size(&mem->mm, block); + i915_buddy_free(&mem->mm, block); + } + INIT_LIST_HEAD(blocks); + + return size; +} + +void +__intel_memory_region_put_pages_buddy(struct intel_memory_region *mem, + struct list_head *blocks) +{ + mutex_lock(&mem->mm_lock); + intel_memory_region_free_pages(mem, blocks); + mutex_unlock(&mem->mm_lock); +} + +void +__intel_memory_region_put_block_buddy(struct i915_buddy_block *block) +{ + struct list_head blocks; + + INIT_LIST_HEAD(&blocks); + list_add(&block->link, &blocks); + __intel_memory_region_put_pages_buddy(block->private, &blocks); +} + +int +__intel_memory_region_get_pages_buddy(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags, + struct list_head *blocks) +{ + unsigned int min_order = 0; + unsigned long n_pages; + + GEM_BUG_ON(!IS_ALIGNED(size, mem->mm.chunk_size)); + GEM_BUG_ON(!list_empty(blocks)); + + if (flags & I915_ALLOC_MIN_PAGE_SIZE) { + min_order = ilog2(mem->min_page_size) - + ilog2(mem->mm.chunk_size); + } + + if (flags & I915_ALLOC_CONTIGUOUS) { + size = roundup_pow_of_two(size); + min_order = ilog2(size) - ilog2(mem->mm.chunk_size); + } + + n_pages = size >> ilog2(mem->mm.chunk_size); + + mutex_lock(&mem->mm_lock); + + do { + struct i915_buddy_block *block; + unsigned int order; + + order = fls(n_pages) - 1; + GEM_BUG_ON(order > mem->mm.max_order); + GEM_BUG_ON(order < min_order); + + do { + block = i915_buddy_alloc(&mem->mm, order); + if (!IS_ERR(block)) + break; + + if (order-- == min_order) + goto err_free_blocks; + } while (1); + + n_pages -= BIT(order); + + block->private = mem; + list_add(&block->link, blocks); + + if (!n_pages) + break; + } while (1); + + mutex_unlock(&mem->mm_lock); + return 0; + +err_free_blocks: + intel_memory_region_free_pages(mem, blocks); + mutex_unlock(&mem->mm_lock); + return -ENXIO; +} + +struct i915_buddy_block * +__intel_memory_region_get_block_buddy(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags) +{ + struct i915_buddy_block *block; + LIST_HEAD(blocks); + int ret; + + ret = __intel_memory_region_get_pages_buddy(mem, size, flags, &blocks); + if (ret) + return ERR_PTR(ret); + + block = list_first_entry(&blocks, typeof(*block), link); + list_del_init(&block->link); + return block; +} + +int intel_memory_region_init_buddy(struct intel_memory_region *mem) +{ + return i915_buddy_init(&mem->mm, resource_size(&mem->region), + PAGE_SIZE); +} + +void intel_memory_region_release_buddy(struct intel_memory_region *mem) +{ + i915_buddy_fini(&mem->mm); +} + +struct intel_memory_region * +intel_memory_region_create(struct drm_i915_private *i915, + resource_size_t start, + resource_size_t size, + resource_size_t min_page_size, + resource_size_t io_start, + const struct intel_memory_region_ops *ops) +{ + struct intel_memory_region *mem; + int err; + + mem = kzalloc(sizeof(*mem), GFP_KERNEL); + if (!mem) + return ERR_PTR(-ENOMEM); + + mem->i915 = i915; + mem->region = (struct resource)DEFINE_RES_MEM(start, size); + mem->io_start = io_start; + mem->min_page_size = min_page_size; + mem->ops = ops; + + mutex_init(&mem->objects.lock); + INIT_LIST_HEAD(&mem->objects.list); + INIT_LIST_HEAD(&mem->objects.purgeable); + + mutex_init(&mem->mm_lock); + + if (ops->init) { + err = ops->init(mem); + if (err) + goto err_free; + } + + kref_init(&mem->kref); + return mem; + +err_free: + kfree(mem); + return ERR_PTR(err); +} + +static void __intel_memory_region_destroy(struct kref *kref) +{ + struct intel_memory_region *mem = + container_of(kref, typeof(*mem), kref); + + if (mem->ops->release) + mem->ops->release(mem); + + mutex_destroy(&mem->mm_lock); + mutex_destroy(&mem->objects.lock); + kfree(mem); +} + +struct intel_memory_region * +intel_memory_region_get(struct intel_memory_region *mem) +{ + kref_get(&mem->kref); + return mem; +} + +void intel_memory_region_put(struct intel_memory_region *mem) +{ + kref_put(&mem->kref, __intel_memory_region_destroy); +} + +/* Global memory region registration -- only slight layer inversions! */ + +int intel_memory_regions_hw_probe(struct drm_i915_private *i915) +{ + int err, i; + + for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { + struct intel_memory_region *mem = ERR_PTR(-ENODEV); + u32 type; + + if (!HAS_REGION(i915, BIT(i))) + continue; + + type = MEMORY_TYPE_FROM_REGION(intel_region_map[i]); + switch (type) { + case INTEL_MEMORY_SYSTEM: + mem = i915_gem_shmem_setup(i915); + break; + case INTEL_MEMORY_STOLEN: + mem = i915_gem_stolen_setup(i915); + break; + case INTEL_MEMORY_LOCAL: + mem = intel_setup_fake_lmem(i915); + break; + } + + if (IS_ERR(mem)) { + err = PTR_ERR(mem); + DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type); + goto out_cleanup; + } + + mem->id = intel_region_map[i]; + mem->type = type; + mem->instance = MEMORY_INSTANCE_FROM_REGION(intel_region_map[i]); + + i915->mm.regions[i] = mem; + } + + return 0; + +out_cleanup: + intel_memory_regions_driver_release(i915); + return err; +} + +void intel_memory_regions_driver_release(struct drm_i915_private *i915) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) { + struct intel_memory_region *region = + fetch_and_zero(&i915->mm.regions[i]); + + if (region) + intel_memory_region_put(region); + } +} + +#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) +#include "selftests/intel_memory_region.c" +#include "selftests/mock_region.c" +#endif diff --git a/drivers/gpu/drm/i915/intel_memory_region.h b/drivers/gpu/drm/i915/intel_memory_region.h new file mode 100644 index 000000000000..238722009677 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_memory_region.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_MEMORY_REGION_H__ +#define __INTEL_MEMORY_REGION_H__ + +#include <linux/kref.h> +#include <linux/ioport.h> +#include <linux/mutex.h> +#include <linux/io-mapping.h> +#include <drm/drm_mm.h> + +#include "i915_buddy.h" + +struct drm_i915_private; +struct drm_i915_gem_object; +struct intel_memory_region; +struct sg_table; + +/** + * Base memory type + */ +enum intel_memory_type { + INTEL_MEMORY_SYSTEM = 0, + INTEL_MEMORY_LOCAL, + INTEL_MEMORY_STOLEN, +}; + +enum intel_region_id { + INTEL_REGION_SMEM = 0, + INTEL_REGION_LMEM, + INTEL_REGION_STOLEN, + INTEL_REGION_UNKNOWN, /* Should be last */ +}; + +#define REGION_SMEM BIT(INTEL_REGION_SMEM) +#define REGION_LMEM BIT(INTEL_REGION_LMEM) +#define REGION_STOLEN BIT(INTEL_REGION_STOLEN) + +#define INTEL_MEMORY_TYPE_SHIFT 16 + +#define MEMORY_TYPE_FROM_REGION(r) (ilog2((r) >> INTEL_MEMORY_TYPE_SHIFT)) +#define MEMORY_INSTANCE_FROM_REGION(r) (ilog2((r) & 0xffff)) + +#define I915_ALLOC_MIN_PAGE_SIZE BIT(0) +#define I915_ALLOC_CONTIGUOUS BIT(1) + +/** + * Memory regions encoded as type | instance + */ +extern const u32 intel_region_map[]; + +struct intel_memory_region_ops { + unsigned int flags; + + int (*init)(struct intel_memory_region *mem); + void (*release)(struct intel_memory_region *mem); + + struct drm_i915_gem_object * + (*create_object)(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags); +}; + +struct intel_memory_region { + struct drm_i915_private *i915; + + const struct intel_memory_region_ops *ops; + + struct io_mapping iomap; + struct resource region; + + /* For fake LMEM */ + struct drm_mm_node fake_mappable; + + struct i915_buddy_mm mm; + struct mutex mm_lock; + + struct kref kref; + + resource_size_t io_start; + resource_size_t min_page_size; + + unsigned int type; + unsigned int instance; + unsigned int id; + + dma_addr_t remap_addr; + + struct { + struct mutex lock; /* Protects access to objects */ + struct list_head list; + struct list_head purgeable; + } objects; +}; + +int intel_memory_region_init_buddy(struct intel_memory_region *mem); +void intel_memory_region_release_buddy(struct intel_memory_region *mem); + +int __intel_memory_region_get_pages_buddy(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags, + struct list_head *blocks); +struct i915_buddy_block * +__intel_memory_region_get_block_buddy(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags); +void __intel_memory_region_put_pages_buddy(struct intel_memory_region *mem, + struct list_head *blocks); +void __intel_memory_region_put_block_buddy(struct i915_buddy_block *block); + +struct intel_memory_region * +intel_memory_region_create(struct drm_i915_private *i915, + resource_size_t start, + resource_size_t size, + resource_size_t min_page_size, + resource_size_t io_start, + const struct intel_memory_region_ops *ops); + +struct intel_memory_region * +intel_memory_region_get(struct intel_memory_region *mem); +void intel_memory_region_put(struct intel_memory_region *mem); + +int intel_memory_regions_hw_probe(struct drm_i915_private *i915); +void intel_memory_regions_driver_release(struct drm_i915_private *i915); + +#endif diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c index 15f8bff141f9..8fd92b9130a7 100644 --- a/drivers/gpu/drm/i915/intel_pch.c +++ b/drivers/gpu/drm/i915/intel_pch.c @@ -52,7 +52,8 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) return PCH_SPT; case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); - WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); + WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && + !IS_COFFEELAKE(dev_priv)); return PCH_SPT; case INTEL_PCH_KBP_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n"); @@ -74,12 +75,16 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) WARN_ON(!IS_COFFEELAKE(dev_priv)); /* CometPoint is CNP Compatible */ return PCH_CNP; + case INTEL_PCH_CMP_V_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found Comet Lake V PCH (CMP-V)\n"); + WARN_ON(!IS_COFFEELAKE(dev_priv)); + /* Comet Lake V PCH is based on KBP, which is SPT compatible */ + return PCH_SPT; case INTEL_PCH_ICP_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found Ice Lake PCH\n"); WARN_ON(!IS_ICELAKE(dev_priv)); return PCH_ICP; case INTEL_PCH_MCC_DEVICE_ID_TYPE: - case INTEL_PCH_MCC2_DEVICE_ID_TYPE: DRM_DEBUG_KMS("Found Mule Creek Canyon PCH\n"); WARN_ON(!IS_ELKHARTLAKE(dev_priv)); return PCH_MCC; @@ -87,6 +92,11 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) DRM_DEBUG_KMS("Found Tiger Lake LP PCH\n"); WARN_ON(!IS_TIGERLAKE(dev_priv)); return PCH_TGP; + case INTEL_PCH_JSP_DEVICE_ID_TYPE: + case INTEL_PCH_JSP2_DEVICE_ID_TYPE: + DRM_DEBUG_KMS("Found Jasper Lake PCH\n"); + WARN_ON(!IS_ELKHARTLAKE(dev_priv)); + return PCH_JSP; default: return PCH_NONE; } diff --git a/drivers/gpu/drm/i915/intel_pch.h b/drivers/gpu/drm/i915/intel_pch.h index c29c81ec7971..d26c25dd8d54 100644 --- a/drivers/gpu/drm/i915/intel_pch.h +++ b/drivers/gpu/drm/i915/intel_pch.h @@ -23,6 +23,7 @@ enum intel_pch { PCH_SPT, /* Sunrisepoint/Kaby Lake PCH */ PCH_CNP, /* Cannon/Comet Lake PCH */ PCH_ICP, /* Ice Lake PCH */ + PCH_JSP, /* Jasper Lake PCH */ PCH_MCC, /* Mule Creek Canyon PCH */ PCH_TGP, /* Tiger Lake PCH */ }; @@ -42,16 +43,19 @@ enum intel_pch { #define INTEL_PCH_CNP_LP_DEVICE_ID_TYPE 0x9D80 #define INTEL_PCH_CMP_DEVICE_ID_TYPE 0x0280 #define INTEL_PCH_CMP2_DEVICE_ID_TYPE 0x0680 +#define INTEL_PCH_CMP_V_DEVICE_ID_TYPE 0xA380 #define INTEL_PCH_ICP_DEVICE_ID_TYPE 0x3480 #define INTEL_PCH_MCC_DEVICE_ID_TYPE 0x4B00 -#define INTEL_PCH_MCC2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_TGP_DEVICE_ID_TYPE 0xA080 +#define INTEL_PCH_JSP_DEVICE_ID_TYPE 0x4D80 +#define INTEL_PCH_JSP2_DEVICE_ID_TYPE 0x3880 #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 #define INTEL_PCH_P3X_DEVICE_ID_TYPE 0x7000 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id) +#define HAS_PCH_JSP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_JSP) #define HAS_PCH_MCC(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_MCC) #define HAS_PCH_TGP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_TGP) #define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 2efe1d12d5a9..809bff955b5a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -25,7 +25,6 @@ * */ -#include <linux/cpufreq.h> #include <linux/module.h> #include <linux/pm_runtime.h> @@ -38,6 +37,8 @@ #include "display/intel_fbc.h" #include "display/intel_sprite.h" +#include "gt/intel_llc.h" + #include "i915_drv.h" #include "i915_irq.h" #include "i915_trace.h" @@ -45,26 +46,6 @@ #include "intel_sideband.h" #include "../../../platform/x86/intel_ips.h" -/** - * DOC: RC6 - * - * RC6 is a special power stage which allows the GPU to enter an very - * low-voltage mode when idle, using down to 0V while at this stage. This - * stage is entered automatically when the GPU is idle when RC6 support is - * enabled, and as soon as new workload arises GPU wakes up automatically as well. - * - * There are different RC6 modes available in Intel GPU, which differentiate - * among each other with the latency required to enter and leave RC6 and - * voltage consumed by the GPU in different states. - * - * The combination of the following flags define which states GPU is allowed - * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and - * RC6pp is deepest RC6. Their support by hardware varies according to the - * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one - * which brings the most power savings; deeper states save more power, but - * require higher latency to switch to and wake up. - */ - static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) { if (HAS_LLC(dev_priv)) { @@ -224,8 +205,6 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv) break; } - dev_priv->ips.r_t = dev_priv->mem_freq; - switch (csipll & 0x3ff) { case 0x00c: dev_priv->fsb_freq = 3200; @@ -254,14 +233,6 @@ static void i915_ironlake_get_mem_freq(struct drm_i915_private *dev_priv) dev_priv->fsb_freq = 0; break; } - - if (dev_priv->fsb_freq == 3200) { - dev_priv->ips.c_m = 0; - } else if (dev_priv->fsb_freq > 3200 && dev_priv->fsb_freq <= 4800) { - dev_priv->ips.c_m = 1; - } else { - dev_priv->ips.c_m = 2; - } } static const struct cxsr_latency cxsr_latency_table[] = { @@ -1145,10 +1116,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; - if (plane->id == PLANE_CURSOR) - width = plane_state->base.crtc_w; - else - width = drm_rect_width(&plane_state->base.dst); + width = drm_rect_width(&plane_state->base.dst); if (plane->id == PLANE_CURSOR) { wm = intel_wm_method2(clock, htotal, width, cpp, latency); @@ -1335,8 +1303,8 @@ static int g4x_compute_pipe_wm(struct intel_crtc_state *crtc_state) struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state); struct g4x_wm_state *wm_state = &crtc_state->wm.g4x.optimal; - int num_active_planes = hweight32(crtc_state->active_planes & - ~BIT(PLANE_CURSOR)); + int num_active_planes = hweight8(crtc_state->active_planes & + ~BIT(PLANE_CURSOR)); const struct g4x_pipe_wm *raw; const struct intel_plane_state *old_plane_state; const struct intel_plane_state *new_plane_state; @@ -1498,7 +1466,7 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv, struct g4x_wm_values *wm) { struct intel_crtc *crtc; - int num_active_crtcs = 0; + int num_active_pipes = 0; wm->cxsr = true; wm->hpll_en = true; @@ -1517,10 +1485,10 @@ static void g4x_merge_wm(struct drm_i915_private *dev_priv, if (!wm_state->fbc_en) wm->fbc_en = false; - num_active_crtcs++; + num_active_pipes++; } - if (num_active_crtcs != 1) { + if (num_active_pipes != 1) { wm->cxsr = false; wm->hpll_en = false; wm->fbc_en = false; @@ -1667,7 +1635,7 @@ static int vlv_compute_fifo(struct intel_crtc_state *crtc_state) &crtc_state->wm.vlv.raw[VLV_WM_LEVEL_PM2]; struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; unsigned int active_planes = crtc_state->active_planes & ~BIT(PLANE_CURSOR); - int num_active_planes = hweight32(active_planes); + int num_active_planes = hweight8(active_planes); const int fifo_size = 511; int fifo_extra, fifo_left = fifo_size; int sprite0_fifo_extra = 0; @@ -1856,8 +1824,8 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) struct vlv_wm_state *wm_state = &crtc_state->wm.vlv.optimal; const struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state; - int num_active_planes = hweight32(crtc_state->active_planes & - ~BIT(PLANE_CURSOR)); + int num_active_planes = hweight8(crtc_state->active_planes & + ~BIT(PLANE_CURSOR)); bool needs_modeset = drm_atomic_crtc_needs_modeset(&crtc_state->base); const struct intel_plane_state *old_plane_state; const struct intel_plane_state *new_plane_state; @@ -1917,7 +1885,7 @@ static int vlv_compute_pipe_wm(struct intel_crtc_state *crtc_state) for (level = 0; level < wm_state->num_levels; level++) { const struct g4x_pipe_wm *raw = &crtc_state->wm.vlv.raw[level]; - const int sr_fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1; + const int sr_fifo_size = INTEL_NUM_PIPES(dev_priv) * 512 - 1; if (!vlv_raw_crtc_wm_is_valid(crtc_state, level)) break; @@ -2106,7 +2074,7 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, struct vlv_wm_values *wm) { struct intel_crtc *crtc; - int num_active_crtcs = 0; + int num_active_pipes = 0; wm->level = dev_priv->wm.max_level; wm->cxsr = true; @@ -2120,14 +2088,14 @@ static void vlv_merge_wm(struct drm_i915_private *dev_priv, if (!wm_state->cxsr) wm->cxsr = false; - num_active_crtcs++; + num_active_pipes++; wm->level = min_t(int, wm->level, wm_state->num_levels - 1); } - if (num_active_crtcs != 1) + if (num_active_pipes != 1) wm->cxsr = false; - if (num_active_crtcs > 1) + if (num_active_pipes > 1) wm->level = VLV_WM_LEVEL_PM2; for_each_intel_crtc(&dev_priv->drm, crtc) { @@ -2577,7 +2545,8 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, return ilk_wm_method2(crtc_state->pixel_rate, crtc_state->base.adjusted_mode.crtc_htotal, - plane_state->base.crtc_w, cpp, mem_value); + drm_rect_width(&plane_state->base.dst), + cpp, mem_value); } /* Only for WM_LP. */ @@ -2656,7 +2625,7 @@ static unsigned int ilk_plane_wm_max(const struct drm_i915_private *dev_priv, /* HSW allows LP1+ watermarks even with multiple pipes */ if (level == 0 || config->num_pipes_active > 1) { - fifo_size /= INTEL_INFO(dev_priv)->num_pipes; + fifo_size /= INTEL_NUM_PIPES(dev_priv); /* * For some reason the non self refresh @@ -3117,8 +3086,8 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) struct intel_pipe_wm *pipe_wm; struct drm_device *dev = state->dev; const struct drm_i915_private *dev_priv = to_i915(dev); - struct drm_plane *plane; - const struct drm_plane_state *plane_state; + struct intel_plane *plane; + const struct intel_plane_state *plane_state; const struct intel_plane_state *pristate = NULL; const struct intel_plane_state *sprstate = NULL; const struct intel_plane_state *curstate = NULL; @@ -3127,15 +3096,13 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state) pipe_wm = &crtc_state->wm.ilk.optimal; - drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, &crtc_state->base) { - const struct intel_plane_state *ps = to_intel_plane_state(plane_state); - - if (plane->type == DRM_PLANE_TYPE_PRIMARY) - pristate = ps; - else if (plane->type == DRM_PLANE_TYPE_OVERLAY) - sprstate = ps; - else if (plane->type == DRM_PLANE_TYPE_CURSOR) - curstate = ps; + intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { + if (plane->base.type == DRM_PLANE_TYPE_PRIMARY) + pristate = plane_state; + else if (plane->base.type == DRM_PLANE_TYPE_OVERLAY) + sprstate = plane_state; + else if (plane->base.type == DRM_PLANE_TYPE_CURSOR) + curstate = plane_state; } pipe_wm->pipe_enabled = crtc_state->base.active; @@ -3662,10 +3629,47 @@ static bool skl_needs_memory_bw_wa(struct drm_i915_private *dev_priv) static bool intel_has_sagv(struct drm_i915_private *dev_priv) { + /* HACK! */ + if (IS_GEN(dev_priv, 12)) + return false; + return (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) && dev_priv->sagv_status != I915_SAGV_NOT_CONTROLLED; } +static void +skl_setup_sagv_block_time(struct drm_i915_private *dev_priv) +{ + if (INTEL_GEN(dev_priv) >= 12) { + u32 val = 0; + int ret; + + ret = sandybridge_pcode_read(dev_priv, + GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, + &val, NULL); + if (!ret) { + dev_priv->sagv_block_time_us = val; + return; + } + + DRM_DEBUG_DRIVER("Couldn't read SAGV block time!\n"); + } else if (IS_GEN(dev_priv, 11)) { + dev_priv->sagv_block_time_us = 10; + return; + } else if (IS_GEN(dev_priv, 10)) { + dev_priv->sagv_block_time_us = 20; + return; + } else if (IS_GEN(dev_priv, 9)) { + dev_priv->sagv_block_time_us = 30; + return; + } else { + MISSING_CASE(INTEL_GEN(dev_priv)); + } + + /* Default to an unusable block time */ + dev_priv->sagv_block_time_us = -1; +} + /* * SAGV dynamically adjusts the system agent voltage and clock frequencies * depending on power and performance requirements. The display engine access @@ -3754,33 +3758,25 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) struct intel_crtc_state *crtc_state; enum pipe pipe; int level, latency; - int sagv_block_time_us; if (!intel_has_sagv(dev_priv)) return false; - if (IS_GEN(dev_priv, 9)) - sagv_block_time_us = 30; - else if (IS_GEN(dev_priv, 10)) - sagv_block_time_us = 20; - else - sagv_block_time_us = 10; - /* * If there are no active CRTCs, no additional checks need be performed */ - if (hweight32(state->active_crtcs) == 0) + if (hweight8(state->active_pipes) == 0) return true; /* * SKL+ workaround: bspec recommends we disable SAGV when we have * more then one pipe enabled */ - if (hweight32(state->active_crtcs) > 1) + if (hweight8(state->active_pipes) > 1) return false; /* Since we're now guaranteed to only have one active CRTC... */ - pipe = ffs(state->active_crtcs) - 1; + pipe = ffs(state->active_pipes) - 1; crtc = intel_get_crtc_for_pipe(dev_priv, pipe); crtc_state = to_intel_crtc_state(crtc->base.state); @@ -3812,7 +3808,7 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state) * incur memory latencies higher than sagv_block_time_us we * can't enable SAGV. */ - if (latency < sagv_block_time_us) + if (latency < dev_priv->sagv_block_time_us) return false; } @@ -3875,14 +3871,14 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv, if (WARN_ON(!state) || !crtc_state->base.active) { alloc->start = 0; alloc->end = 0; - *num_active = hweight32(dev_priv->active_crtcs); + *num_active = hweight8(dev_priv->active_pipes); return; } if (intel_state->active_pipe_changes) - *num_active = hweight32(intel_state->active_crtcs); + *num_active = hweight8(intel_state->active_pipes); else - *num_active = hweight32(dev_priv->active_crtcs); + *num_active = hweight8(dev_priv->active_pipes); ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate, *num_active, ddb); @@ -4013,7 +4009,8 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, val = I915_READ(PLANE_BUF_CFG(pipe, plane_id)); val2 = I915_READ(PLANE_NV12_BUF_CFG(pipe, plane_id)); - if (is_planar_yuv_format(fourcc)) + if (fourcc && + drm_format_info_is_yuv_semiplanar(drm_format_info(fourcc))) swap(val, val2); skl_ddb_entry_init_from_hw(dev_priv, ddb_y, val); @@ -4071,7 +4068,6 @@ static uint_fixed_16_16_t skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); u32 src_w, src_h, dst_w, dst_h; uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; uint_fixed_16_16_t downscale_h, downscale_w; @@ -4079,27 +4075,17 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, if (WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state))) return u32_to_fixed16(0); - /* n.b., src is 16.16 fixed point, dst is whole integer */ - if (plane->id == PLANE_CURSOR) { - /* - * Cursors only support 0/180 degree rotation, - * hence no need to account for rotation here. - */ - src_w = plane_state->base.src_w >> 16; - src_h = plane_state->base.src_h >> 16; - dst_w = plane_state->base.crtc_w; - dst_h = plane_state->base.crtc_h; - } else { - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - */ - src_w = drm_rect_width(&plane_state->base.src) >> 16; - src_h = drm_rect_height(&plane_state->base.src) >> 16; - dst_w = drm_rect_width(&plane_state->base.dst); - dst_h = drm_rect_height(&plane_state->base.dst); - } + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + * + * n.b., src is 16.16 fixed point, dst is whole integer. + */ + src_w = drm_rect_width(&plane_state->base.src) >> 16; + src_h = drm_rect_height(&plane_state->base.src) >> 16; + dst_w = drm_rect_width(&plane_state->base.dst); + dst_h = drm_rect_height(&plane_state->base.dst); fp_w_ratio = div_fixed16(src_w, dst_w); fp_h_ratio = div_fixed16(src_h, dst_h); @@ -4109,117 +4095,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state, return mul_fixed16(downscale_w, downscale_h); } -static uint_fixed_16_16_t -skl_pipe_downscale_amount(const struct intel_crtc_state *crtc_state) -{ - uint_fixed_16_16_t pipe_downscale = u32_to_fixed16(1); - - if (!crtc_state->base.enable) - return pipe_downscale; - - if (crtc_state->pch_pfit.enabled) { - u32 src_w, src_h, dst_w, dst_h; - u32 pfit_size = crtc_state->pch_pfit.size; - uint_fixed_16_16_t fp_w_ratio, fp_h_ratio; - uint_fixed_16_16_t downscale_h, downscale_w; - - src_w = crtc_state->pipe_src_w; - src_h = crtc_state->pipe_src_h; - dst_w = pfit_size >> 16; - dst_h = pfit_size & 0xffff; - - if (!dst_w || !dst_h) - return pipe_downscale; - - fp_w_ratio = div_fixed16(src_w, dst_w); - fp_h_ratio = div_fixed16(src_h, dst_h); - downscale_w = max_fixed16(fp_w_ratio, u32_to_fixed16(1)); - downscale_h = max_fixed16(fp_h_ratio, u32_to_fixed16(1)); - - pipe_downscale = mul_fixed16(downscale_w, downscale_h); - } - - return pipe_downscale; -} - -int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, - struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev); - struct drm_atomic_state *state = crtc_state->base.state; - struct drm_plane *plane; - const struct drm_plane_state *drm_plane_state; - int crtc_clock, dotclk; - u32 pipe_max_pixel_rate; - uint_fixed_16_16_t pipe_downscale; - uint_fixed_16_16_t max_downscale = u32_to_fixed16(1); - - if (!crtc_state->base.enable) - return 0; - - drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) { - uint_fixed_16_16_t plane_downscale; - uint_fixed_16_16_t fp_9_div_8 = div_fixed16(9, 8); - int bpp; - const struct intel_plane_state *plane_state = - to_intel_plane_state(drm_plane_state); - - if (!intel_wm_plane_visible(crtc_state, plane_state)) - continue; - - if (WARN_ON(!plane_state->base.fb)) - return -EINVAL; - - plane_downscale = skl_plane_downscale_amount(crtc_state, plane_state); - bpp = plane_state->base.fb->format->cpp[0] * 8; - if (bpp == 64) - plane_downscale = mul_fixed16(plane_downscale, - fp_9_div_8); - - max_downscale = max_fixed16(plane_downscale, max_downscale); - } - pipe_downscale = skl_pipe_downscale_amount(crtc_state); - - pipe_downscale = mul_fixed16(pipe_downscale, max_downscale); - - crtc_clock = crtc_state->base.adjusted_mode.crtc_clock; - dotclk = to_intel_atomic_state(state)->cdclk.logical.cdclk; - - if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10) - dotclk *= 2; - - pipe_max_pixel_rate = div_round_up_u32_fixed16(dotclk, pipe_downscale); - - if (pipe_max_pixel_rate < crtc_clock) { - DRM_DEBUG_KMS("Max supported pixel clock with scaling exceeded\n"); - return -EINVAL; - } - - return 0; -} - static u64 skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, - const int plane) + int color_plane) { - struct intel_plane *intel_plane = to_intel_plane(plane_state->base.plane); + struct intel_plane *plane = to_intel_plane(plane_state->base.plane); + const struct drm_framebuffer *fb = plane_state->base.fb; u32 data_rate; u32 width = 0, height = 0; - struct drm_framebuffer *fb; - u32 format; uint_fixed_16_16_t down_scale_amount; u64 rate; if (!plane_state->base.visible) return 0; - fb = plane_state->base.fb; - format = fb->format->format; - - if (intel_plane->id == PLANE_CURSOR) + if (plane->id == PLANE_CURSOR) return 0; - if (plane == 1 && !is_planar_yuv_format(format)) + + if (color_plane == 1 && + !drm_format_info_is_yuv_semiplanar(fb->format)) return 0; /* @@ -4231,7 +4126,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, height = drm_rect_height(&plane_state->base.src) >> 16; /* UV plane does 1/2 pixel sub-sampling */ - if (plane == 1 && is_planar_yuv_format(format)) { + if (color_plane == 1) { width /= 2; height /= 2; } @@ -4242,7 +4137,7 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, rate = mul_round_up_u32_fixed16(data_rate, down_scale_amount); - rate *= fb->format->cpp[plane]; + rate *= fb->format->cpp[color_plane]; return rate; } @@ -4252,18 +4147,16 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, u64 *uv_plane_data_rate) { struct drm_atomic_state *state = crtc_state->base.state; - struct drm_plane *plane; - const struct drm_plane_state *drm_plane_state; + struct intel_plane *plane; + const struct intel_plane_state *plane_state; u64 total_data_rate = 0; if (WARN_ON(!state)) return 0; /* Calculate and cache data rate for each plane */ - drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) { - enum plane_id plane_id = to_intel_plane(plane)->id; - const struct intel_plane_state *plane_state = - to_intel_plane_state(drm_plane_state); + intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { + enum plane_id plane_id = plane->id; u64 rate; /* packed/y */ @@ -4284,21 +4177,19 @@ static u64 icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, u64 *plane_data_rate) { - struct drm_plane *plane; - const struct drm_plane_state *drm_plane_state; + struct intel_plane *plane; + const struct intel_plane_state *plane_state; u64 total_data_rate = 0; if (WARN_ON(!crtc_state->base.state)) return 0; /* Calculate and cache data rate for each plane */ - drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, &crtc_state->base) { - const struct intel_plane_state *plane_state = - to_intel_plane_state(drm_plane_state); - enum plane_id plane_id = to_intel_plane(plane)->id; + intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) { + enum plane_id plane_id = plane->id; u64 rate; - if (!plane_state->linked_plane) { + if (!plane_state->planar_linked_plane) { rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); plane_data_rate[plane_id] = rate; total_data_rate += rate; @@ -4307,17 +4198,17 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state, /* * The slave plane might not iterate in - * drm_atomic_crtc_state_for_each_plane_state(), + * intel_atomic_crtc_state_for_each_plane_state(), * and needs the master plane state which may be * NULL if we try get_new_plane_state(), so we * always calculate from the master. */ - if (plane_state->slave) + if (plane_state->planar_slave) continue; /* Y plane rate is calculated on the slave */ rate = skl_plane_relative_data_rate(crtc_state, plane_state, 0); - y_plane_id = plane_state->linked_plane->id; + y_plane_id = plane_state->planar_linked_plane->id; plane_data_rate[y_plane_id] = rate; total_data_rate += rate; @@ -4647,7 +4538,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, u32 interm_pbpl; /* only planar format has two planes */ - if (color_plane == 1 && !is_planar_yuv_format(format->format)) { + if (color_plane == 1 && !drm_format_info_is_yuv_semiplanar(format)) { DRM_DEBUG_KMS("Non planar format have single plane\n"); return -EINVAL; } @@ -4659,7 +4550,7 @@ skl_compute_wm_params(const struct intel_crtc_state *crtc_state, wp->x_tiled = modifier == I915_FORMAT_MOD_X_TILED; wp->rc_surface = modifier == I915_FORMAT_MOD_Y_TILED_CCS || modifier == I915_FORMAT_MOD_Yf_TILED_CCS; - wp->is_planar = is_planar_yuv_format(format->format); + wp->is_planar = drm_format_info_is_yuv_semiplanar(format); wp->width = width; if (color_plane == 1 && wp->is_planar) @@ -4731,20 +4622,15 @@ skl_compute_plane_wm_params(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, struct skl_wm_params *wp, int color_plane) { - struct intel_plane *plane = to_intel_plane(plane_state->base.plane); const struct drm_framebuffer *fb = plane_state->base.fb; int width; - if (plane->id == PLANE_CURSOR) { - width = plane_state->base.crtc_w; - } else { - /* - * Src coordinates are already rotated by 270 degrees for - * the 90/270 degree plane rotation cases (to match the - * GTT mapping), hence no need to account for rotation here. - */ - width = drm_rect_width(&plane_state->base.src) >> 16; - } + /* + * Src coordinates are already rotated by 270 degrees for + * the 90/270 degree plane rotation cases (to match the + * GTT mapping), hence no need to account for rotation here. + */ + width = drm_rect_width(&plane_state->base.src) >> 16; return skl_compute_wm_params(crtc_state, width, fb->format, fb->modifier, @@ -5056,12 +4942,12 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, int ret; /* Watermarks calculated in master */ - if (plane_state->slave) + if (plane_state->planar_slave) return 0; - if (plane_state->linked_plane) { + if (plane_state->planar_linked_plane) { const struct drm_framebuffer *fb = plane_state->base.fb; - enum plane_id y_plane_id = plane_state->linked_plane->id; + enum plane_id y_plane_id = plane_state->planar_linked_plane->id; WARN_ON(!intel_wm_plane_visible(crtc_state, plane_state)); WARN_ON(!fb->format->is_yuv || @@ -5090,8 +4976,8 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state) { struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev); struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; - struct drm_plane *plane; - const struct drm_plane_state *drm_plane_state; + struct intel_plane *plane; + const struct intel_plane_state *plane_state; int ret; /* @@ -5100,10 +4986,8 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state) */ memset(pipe_wm->planes, 0, sizeof(pipe_wm->planes)); - drm_atomic_crtc_state_for_each_plane_state(plane, drm_plane_state, - &crtc_state->base) { - const struct intel_plane_state *plane_state = - to_intel_plane_state(drm_plane_state); + intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, + crtc_state) { if (INTEL_GEN(dev_priv) >= 11) ret = icl_build_plane_wm(crtc_state, plane_state); @@ -5263,19 +5147,6 @@ bool skl_ddb_allocation_overlaps(const struct skl_ddb_entry *ddb, return false; } -static u32 -pipes_modified(struct intel_atomic_state *state) -{ - struct intel_crtc *crtc; - struct intel_crtc_state *crtc_state; - u32 i, ret = 0; - - for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) - ret |= drm_crtc_mask(&crtc->base); - - return ret; -} - static int skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) @@ -5451,36 +5322,27 @@ skl_print_wm_changes(struct intel_atomic_state *state) } } -static int -skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed) +static int intel_add_all_pipes(struct intel_atomic_state *state) { - struct drm_device *dev = state->base.dev; - const struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc *crtc; - struct intel_crtc_state *crtc_state; - u32 realloc_pipes = pipes_modified(state); - int ret, i; - /* - * When we distrust bios wm we always need to recompute to set the - * expected DDB allocations for each CRTC. - */ - if (dev_priv->wm.distrust_bios_wm) - (*changed) = true; + for_each_intel_crtc(&dev_priv->drm, crtc) { + struct intel_crtc_state *crtc_state; - /* - * If this transaction isn't actually touching any CRTC's, don't - * bother with watermark calculation. Note that if we pass this - * test, we're guaranteed to hold at least one CRTC state mutex, - * which means we can safely use values like dev_priv->active_crtcs - * since any racing commits that want to update them would need to - * hold _all_ CRTC state mutexes. - */ - for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) - (*changed) = true; + crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + } - if (!*changed) - return 0; + return 0; +} + +static int +skl_ddb_add_affected_pipes(struct intel_atomic_state *state) +{ + struct drm_i915_private *dev_priv = to_i915(state->base.dev); + int ret; /* * If this is our first atomic update following hardware readout, @@ -5489,7 +5351,7 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed) * ensure a full DDB recompute. */ if (dev_priv->wm.distrust_bios_wm) { - ret = drm_modeset_lock(&dev->mode_config.connection_mutex, + ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, state->base.acquire_ctx); if (ret) return ret; @@ -5497,13 +5359,13 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed) state->active_pipe_changes = ~0; /* - * We usually only initialize state->active_crtcs if we + * We usually only initialize state->active_pipes if we * we're doing a modeset; make sure this field is always * initialized during the sanitization process that happens * on the first commit too. */ if (!state->modeset) - state->active_crtcs = dev_priv->active_crtcs; + state->active_pipes = dev_priv->active_pipes; } /* @@ -5520,18 +5382,11 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state, bool *changed) * to grab the lock on *all* CRTC's. */ if (state->active_pipe_changes || state->modeset) { - realloc_pipes = ~0; state->wm_results.dirty_pipes = ~0; - } - /* - * We're not recomputing for the pipes not included in the commit, so - * make sure we start with the current state. - */ - for_each_intel_crtc_mask(dev, crtc, realloc_pipes) { - crtc_state = intel_atomic_get_crtc_state(&state->base, crtc); - if (IS_ERR(crtc_state)) - return PTR_ERR(crtc_state); + ret = intel_add_all_pipes(state); + if (ret) + return ret; } return 0; @@ -5604,14 +5459,13 @@ skl_compute_wm(struct intel_atomic_state *state) struct intel_crtc_state *new_crtc_state; struct intel_crtc_state *old_crtc_state; struct skl_ddb_values *results = &state->wm_results; - bool changed = false; int ret, i; /* Clear all dirty flags */ results->dirty_pipes = 0; - ret = skl_ddb_add_affected_pipes(state, &changed); - if (ret || !changed) + ret = skl_ddb_add_affected_pipes(state); + if (ret) return ret; /* @@ -5633,7 +5487,7 @@ skl_compute_wm(struct intel_atomic_state *state) if (!skl_pipe_wm_equals(crtc, &old_crtc_state->wm.skl.optimal, &new_crtc_state->wm.skl.optimal)) - results->dirty_pipes |= drm_crtc_mask(&crtc->base); + results->dirty_pipes |= BIT(crtc->pipe); } ret = skl_compute_ddb(state); @@ -5653,7 +5507,7 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal; enum pipe pipe = crtc->pipe; - if (!(state->wm_results.dirty_pipes & drm_crtc_mask(&crtc->base))) + if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0) return; I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime); @@ -5662,12 +5516,11 @@ static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state, static void skl_initial_wm(struct intel_atomic_state *state, struct intel_crtc_state *crtc_state) { - struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc); - struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); struct skl_ddb_values *results = &state->wm_results; - if ((results->dirty_pipes & drm_crtc_mask(&intel_crtc->base)) == 0) + if ((results->dirty_pipes & BIT(crtc->pipe)) == 0) return; mutex_lock(&dev_priv->wm.wm_mutex); @@ -5816,10 +5669,10 @@ void skl_wm_get_hw_state(struct drm_i915_private *dev_priv) skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal); if (crtc->active) - hw->dirty_pipes |= drm_crtc_mask(&crtc->base); + hw->dirty_pipes |= BIT(crtc->pipe); } - if (dev_priv->active_crtcs) { + if (dev_priv->active_pipes) { /* Fully recompute DDB on first atomic commit */ dev_priv->wm.distrust_bios_wm = true; } @@ -6397,2488 +6250,6 @@ void intel_init_ipc(struct drm_i915_private *dev_priv) intel_enable_ipc(dev_priv); } -/* - * Lock protecting IPS related data structures - */ -DEFINE_SPINLOCK(mchdev_lock); - -bool ironlake_set_drps(struct drm_i915_private *i915, u8 val) -{ - struct intel_uncore *uncore = &i915->uncore; - u16 rgvswctl; - - lockdep_assert_held(&mchdev_lock); - - rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); - if (rgvswctl & MEMCTL_CMD_STS) { - DRM_DEBUG("gpu busy, RCS change rejected\n"); - return false; /* still busy with another command */ - } - - rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | - (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; - intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); - intel_uncore_posting_read16(uncore, MEMSWCTL); - - rgvswctl |= MEMCTL_CMD_STS; - intel_uncore_write16(uncore, MEMSWCTL, rgvswctl); - - return true; -} - -static void ironlake_enable_drps(struct drm_i915_private *dev_priv) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - u32 rgvmodectl; - u8 fmax, fmin, fstart, vstart; - - spin_lock_irq(&mchdev_lock); - - rgvmodectl = intel_uncore_read(uncore, MEMMODECTL); - - /* Enable temp reporting */ - intel_uncore_write16(uncore, PMMISC, I915_READ(PMMISC) | MCPPCE_EN); - intel_uncore_write16(uncore, TSC1, I915_READ(TSC1) | TSE); - - /* 100ms RC evaluation intervals */ - intel_uncore_write(uncore, RCUPEI, 100000); - intel_uncore_write(uncore, RCDNEI, 100000); - - /* Set max/min thresholds to 90ms and 80ms respectively */ - intel_uncore_write(uncore, RCBMAXAVG, 90000); - intel_uncore_write(uncore, RCBMINAVG, 80000); - - intel_uncore_write(uncore, MEMIHYST, 1); - - /* Set up min, max, and cur for interrupt handling */ - fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; - fmin = (rgvmodectl & MEMMODE_FMIN_MASK); - fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> - MEMMODE_FSTART_SHIFT; - - vstart = (intel_uncore_read(uncore, PXVFREQ(fstart)) & - PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT; - - dev_priv->ips.fmax = fmax; /* IPS callback will increase this */ - dev_priv->ips.fstart = fstart; - - dev_priv->ips.max_delay = fstart; - dev_priv->ips.min_delay = fmin; - dev_priv->ips.cur_delay = fstart; - - DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", - fmax, fmin, fstart); - - intel_uncore_write(uncore, - MEMINTREN, - MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); - - /* - * Interrupts will be enabled in ironlake_irq_postinstall - */ - - intel_uncore_write(uncore, VIDSTART, vstart); - intel_uncore_posting_read(uncore, VIDSTART); - - rgvmodectl |= MEMMODE_SWMODE_EN; - intel_uncore_write(uncore, MEMMODECTL, rgvmodectl); - - if (wait_for_atomic((intel_uncore_read(uncore, MEMSWCTL) & - MEMCTL_CMD_STS) == 0, 10)) - DRM_ERROR("stuck trying to change perf mode\n"); - mdelay(1); - - ironlake_set_drps(dev_priv, fstart); - - dev_priv->ips.last_count1 = - intel_uncore_read(uncore, DMIEC) + - intel_uncore_read(uncore, DDREC) + - intel_uncore_read(uncore, CSIEC); - dev_priv->ips.last_time1 = jiffies_to_msecs(jiffies); - dev_priv->ips.last_count2 = intel_uncore_read(uncore, GFXEC); - dev_priv->ips.last_time2 = ktime_get_raw_ns(); - - spin_unlock_irq(&mchdev_lock); -} - -static void ironlake_disable_drps(struct drm_i915_private *i915) -{ - struct intel_uncore *uncore = &i915->uncore; - u16 rgvswctl; - - spin_lock_irq(&mchdev_lock); - - rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); - - /* Ack interrupts, disable EFC interrupt */ - intel_uncore_write(uncore, - MEMINTREN, - intel_uncore_read(uncore, MEMINTREN) & - ~MEMINT_EVAL_CHG_EN); - intel_uncore_write(uncore, MEMINTRSTS, MEMINT_EVAL_CHG); - intel_uncore_write(uncore, - DEIER, - intel_uncore_read(uncore, DEIER) & ~DE_PCU_EVENT); - intel_uncore_write(uncore, DEIIR, DE_PCU_EVENT); - intel_uncore_write(uncore, - DEIMR, - intel_uncore_read(uncore, DEIMR) | DE_PCU_EVENT); - - /* Go back to the starting frequency */ - ironlake_set_drps(i915, i915->ips.fstart); - mdelay(1); - rgvswctl |= MEMCTL_CMD_STS; - intel_uncore_write(uncore, MEMSWCTL, rgvswctl); - mdelay(1); - - spin_unlock_irq(&mchdev_lock); -} - -/* There's a funny hw issue where the hw returns all 0 when reading from - * GEN6_RP_INTERRUPT_LIMITS. Hence we always need to compute the desired value - * ourselves, instead of doing a rmw cycle (which might result in us clearing - * all limits and the gpu stuck at whatever frequency it is at atm). - */ -static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u32 limits; - - /* Only set the down limit when we've reached the lowest level to avoid - * getting more interrupts, otherwise leave this clear. This prevents a - * race in the hw when coming out of rc6: There's a tiny window where - * the hw runs at the minimal clock before selecting the desired - * frequency, if the down threshold expires in that window we will not - * receive a down interrupt. */ - if (INTEL_GEN(dev_priv) >= 9) { - limits = (rps->max_freq_softlimit) << 23; - if (val <= rps->min_freq_softlimit) - limits |= (rps->min_freq_softlimit) << 14; - } else { - limits = rps->max_freq_softlimit << 24; - if (val <= rps->min_freq_softlimit) - limits |= rps->min_freq_softlimit << 16; - } - - return limits; -} - -static void rps_set_power(struct drm_i915_private *dev_priv, int new_power) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u32 threshold_up = 0, threshold_down = 0; /* in % */ - u32 ei_up = 0, ei_down = 0; - - lockdep_assert_held(&rps->power.mutex); - - if (new_power == rps->power.mode) - return; - - /* Note the units here are not exactly 1us, but 1280ns. */ - switch (new_power) { - case LOW_POWER: - /* Upclock if more than 95% busy over 16ms */ - ei_up = 16000; - threshold_up = 95; - - /* Downclock if less than 85% busy over 32ms */ - ei_down = 32000; - threshold_down = 85; - break; - - case BETWEEN: - /* Upclock if more than 90% busy over 13ms */ - ei_up = 13000; - threshold_up = 90; - - /* Downclock if less than 75% busy over 32ms */ - ei_down = 32000; - threshold_down = 75; - break; - - case HIGH_POWER: - /* Upclock if more than 85% busy over 10ms */ - ei_up = 10000; - threshold_up = 85; - - /* Downclock if less than 60% busy over 32ms */ - ei_down = 32000; - threshold_down = 60; - break; - } - - /* When byt can survive without system hang with dynamic - * sw freq adjustments, this restriction can be lifted. - */ - if (IS_VALLEYVIEW(dev_priv)) - goto skip_hw_write; - - I915_WRITE(GEN6_RP_UP_EI, - GT_INTERVAL_FROM_US(dev_priv, ei_up)); - I915_WRITE(GEN6_RP_UP_THRESHOLD, - GT_INTERVAL_FROM_US(dev_priv, - ei_up * threshold_up / 100)); - - I915_WRITE(GEN6_RP_DOWN_EI, - GT_INTERVAL_FROM_US(dev_priv, ei_down)); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, - GT_INTERVAL_FROM_US(dev_priv, - ei_down * threshold_down / 100)); - - I915_WRITE(GEN6_RP_CONTROL, - (INTEL_GEN(dev_priv) > 9 ? 0 : GEN6_RP_MEDIA_TURBO) | - GEN6_RP_MEDIA_HW_NORMAL_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_AVG); - -skip_hw_write: - rps->power.mode = new_power; - rps->power.up_threshold = threshold_up; - rps->power.down_threshold = threshold_down; -} - -static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - int new_power; - - new_power = rps->power.mode; - switch (rps->power.mode) { - case LOW_POWER: - if (val > rps->efficient_freq + 1 && - val > rps->cur_freq) - new_power = BETWEEN; - break; - - case BETWEEN: - if (val <= rps->efficient_freq && - val < rps->cur_freq) - new_power = LOW_POWER; - else if (val >= rps->rp0_freq && - val > rps->cur_freq) - new_power = HIGH_POWER; - break; - - case HIGH_POWER: - if (val < (rps->rp1_freq + rps->rp0_freq) >> 1 && - val < rps->cur_freq) - new_power = BETWEEN; - break; - } - /* Max/min bins are special */ - if (val <= rps->min_freq_softlimit) - new_power = LOW_POWER; - if (val >= rps->max_freq_softlimit) - new_power = HIGH_POWER; - - mutex_lock(&rps->power.mutex); - if (rps->power.interactive) - new_power = HIGH_POWER; - rps_set_power(dev_priv, new_power); - mutex_unlock(&rps->power.mutex); -} - -void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive) -{ - struct intel_rps *rps = &i915->gt_pm.rps; - - if (INTEL_GEN(i915) < 6) - return; - - mutex_lock(&rps->power.mutex); - if (interactive) { - if (!rps->power.interactive++ && READ_ONCE(i915->gt.awake)) - rps_set_power(i915, HIGH_POWER); - } else { - GEM_BUG_ON(!rps->power.interactive); - rps->power.interactive--; - } - mutex_unlock(&rps->power.mutex); -} - -static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u32 mask = 0; - - /* We use UP_EI_EXPIRED interupts for both up/down in manual mode */ - if (val > rps->min_freq_softlimit) - mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT; - if (val < rps->max_freq_softlimit) - mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD; - - mask &= dev_priv->pm_rps_events; - - return gen6_sanitize_rps_pm_mask(dev_priv, ~mask); -} - -/* gen6_set_rps is called to update the frequency request, but should also be - * called when the range (min_delay and max_delay) is modified so that we can - * update the GEN6_RP_INTERRUPT_LIMITS register accordingly. */ -static int gen6_set_rps(struct drm_i915_private *dev_priv, u8 val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* min/max delay may still have been modified so be sure to - * write the limits value. - */ - if (val != rps->cur_freq) { - gen6_set_rps_thresholds(dev_priv, val); - - if (INTEL_GEN(dev_priv) >= 9) - I915_WRITE(GEN6_RPNSWREQ, - GEN9_FREQUENCY(val)); - else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - I915_WRITE(GEN6_RPNSWREQ, - HSW_FREQUENCY(val)); - else - I915_WRITE(GEN6_RPNSWREQ, - GEN6_FREQUENCY(val) | - GEN6_OFFSET(0) | - GEN6_AGGRESSIVE_TURBO); - } - - /* Make sure we continue to get interrupts - * until we hit the minimum or maximum frequencies. - */ - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val)); - I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); - - rps->cur_freq = val; - trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); - - return 0; -} - -static int valleyview_set_rps(struct drm_i915_private *dev_priv, u8 val) -{ - int err; - - if (WARN_ONCE(IS_CHERRYVIEW(dev_priv) && (val & 1), - "Odd GPU freq value\n")) - val &= ~1; - - I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); - - if (val != dev_priv->gt_pm.rps.cur_freq) { - vlv_punit_get(dev_priv); - err = vlv_punit_write(dev_priv, PUNIT_REG_GPU_FREQ_REQ, val); - vlv_punit_put(dev_priv); - if (err) - return err; - - gen6_set_rps_thresholds(dev_priv, val); - } - - dev_priv->gt_pm.rps.cur_freq = val; - trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val)); - - return 0; -} - -/* vlv_set_rps_idle: Set the frequency to idle, if Gfx clocks are down - * - * * If Gfx is Idle, then - * 1. Forcewake Media well. - * 2. Request idle freq. - * 3. Release Forcewake of Media well. -*/ -static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u32 val = rps->idle_freq; - int err; - - if (rps->cur_freq <= val) - return; - - /* The punit delays the write of the frequency and voltage until it - * determines the GPU is awake. During normal usage we don't want to - * waste power changing the frequency if the GPU is sleeping (rc6). - * However, the GPU and driver is now idle and we do not want to delay - * switching to minimum voltage (reducing power whilst idle) as we do - * not expect to be woken in the near future and so must flush the - * change by waking the device. - * - * We choose to take the media powerwell (either would do to trick the - * punit into committing the voltage change) as that takes a lot less - * power than the render powerwell. - */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_MEDIA); - err = valleyview_set_rps(dev_priv, val); - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_MEDIA); - - if (err) - DRM_ERROR("Failed to set RPS for idle\n"); -} - -void gen6_rps_busy(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - mutex_lock(&rps->lock); - if (rps->enabled) { - u8 freq; - - if (dev_priv->pm_rps_events & GEN6_PM_RP_UP_EI_EXPIRED) - gen6_rps_reset_ei(dev_priv); - I915_WRITE(GEN6_PMINTRMSK, - gen6_rps_pm_mask(dev_priv, rps->cur_freq)); - - gen6_enable_rps_interrupts(dev_priv); - - /* Use the user's desired frequency as a guide, but for better - * performance, jump directly to RPe as our starting frequency. - */ - freq = max(rps->cur_freq, - rps->efficient_freq); - - if (intel_set_rps(dev_priv, - clamp(freq, - rps->min_freq_softlimit, - rps->max_freq_softlimit))) - DRM_DEBUG_DRIVER("Failed to set idle frequency\n"); - } - mutex_unlock(&rps->lock); -} - -void gen6_rps_idle(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* Flush our bottom-half so that it does not race with us - * setting the idle frequency and so that it is bounded by - * our rpm wakeref. And then disable the interrupts to stop any - * futher RPS reclocking whilst we are asleep. - */ - gen6_disable_rps_interrupts(dev_priv); - - mutex_lock(&rps->lock); - if (rps->enabled) { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - vlv_set_rps_idle(dev_priv); - else - gen6_set_rps(dev_priv, rps->idle_freq); - rps->last_adj = 0; - I915_WRITE(GEN6_PMINTRMSK, - gen6_sanitize_rps_pm_mask(dev_priv, ~0)); - } - mutex_unlock(&rps->lock); -} - -void gen6_rps_boost(struct i915_request *rq) -{ - struct intel_rps *rps = &rq->i915->gt_pm.rps; - unsigned long flags; - bool boost; - - /* This is intentionally racy! We peek at the state here, then - * validate inside the RPS worker. - */ - if (!rps->enabled) - return; - - if (i915_request_signaled(rq)) - return; - - /* Serializes with i915_request_retire() */ - boost = false; - spin_lock_irqsave(&rq->lock, flags); - if (!i915_request_has_waitboost(rq) && - !dma_fence_is_signaled_locked(&rq->fence)) { - boost = !atomic_fetch_inc(&rps->num_waiters); - rq->flags |= I915_REQUEST_WAITBOOST; - } - spin_unlock_irqrestore(&rq->lock, flags); - if (!boost) - return; - - if (READ_ONCE(rps->cur_freq) < rps->boost_freq) - schedule_work(&rps->work); - - atomic_inc(&rps->boosts); -} - -int intel_set_rps(struct drm_i915_private *dev_priv, u8 val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - int err; - - lockdep_assert_held(&rps->lock); - GEM_BUG_ON(val > rps->max_freq); - GEM_BUG_ON(val < rps->min_freq); - - if (!rps->enabled) { - rps->cur_freq = val; - return 0; - } - - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - err = valleyview_set_rps(dev_priv, val); - else - err = gen6_set_rps(dev_priv, val); - - return err; -} - -static void gen9_disable_rc6(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RC_CONTROL, 0); - I915_WRITE(GEN9_PG_ENABLE, 0); -} - -static void gen9_disable_rps(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RP_CONTROL, 0); -} - -static void gen6_disable_rc6(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RC_CONTROL, 0); -} - -static void gen6_disable_rps(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RPNSWREQ, 1 << 31); - I915_WRITE(GEN6_RP_CONTROL, 0); -} - -static void cherryview_disable_rc6(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RC_CONTROL, 0); -} - -static void cherryview_disable_rps(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RP_CONTROL, 0); -} - -static void valleyview_disable_rc6(struct drm_i915_private *dev_priv) -{ - /* We're doing forcewake before Disabling RC6, - * This what the BIOS expects when going into suspend */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - I915_WRITE(GEN6_RC_CONTROL, 0); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void valleyview_disable_rps(struct drm_i915_private *dev_priv) -{ - I915_WRITE(GEN6_RP_CONTROL, 0); -} - -static bool bxt_check_bios_rc6_setup(struct drm_i915_private *dev_priv) -{ - bool enable_rc6 = true; - unsigned long rc6_ctx_base; - u32 rc_ctl; - int rc_sw_target; - - rc_ctl = I915_READ(GEN6_RC_CONTROL); - rc_sw_target = (I915_READ(GEN6_RC_STATE) & RC_SW_TARGET_STATE_MASK) >> - RC_SW_TARGET_STATE_SHIFT; - DRM_DEBUG_DRIVER("BIOS enabled RC states: " - "HW_CTRL %s HW_RC6 %s SW_TARGET_STATE %x\n", - onoff(rc_ctl & GEN6_RC_CTL_HW_ENABLE), - onoff(rc_ctl & GEN6_RC_CTL_RC6_ENABLE), - rc_sw_target); - - if (!(I915_READ(RC6_LOCATION) & RC6_CTX_IN_DRAM)) { - DRM_DEBUG_DRIVER("RC6 Base location not set properly.\n"); - enable_rc6 = false; - } - - /* - * The exact context size is not known for BXT, so assume a page size - * for this check. - */ - rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK; - if (!((rc6_ctx_base >= dev_priv->dsm_reserved.start) && - (rc6_ctx_base + PAGE_SIZE < dev_priv->dsm_reserved.end))) { - DRM_DEBUG_DRIVER("RC6 Base address not as expected.\n"); - enable_rc6 = false; - } - - if (!(((I915_READ(PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1) && - ((I915_READ(PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1) && - ((I915_READ(PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1) && - ((I915_READ(PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1))) { - DRM_DEBUG_DRIVER("Engine Idle wait time not set properly.\n"); - enable_rc6 = false; - } - - if (!I915_READ(GEN8_PUSHBUS_CONTROL) || - !I915_READ(GEN8_PUSHBUS_ENABLE) || - !I915_READ(GEN8_PUSHBUS_SHIFT)) { - DRM_DEBUG_DRIVER("Pushbus not setup properly.\n"); - enable_rc6 = false; - } - - if (!I915_READ(GEN6_GFXPAUSE)) { - DRM_DEBUG_DRIVER("GFX pause not setup properly.\n"); - enable_rc6 = false; - } - - if (!I915_READ(GEN8_MISC_CTRL0)) { - DRM_DEBUG_DRIVER("GPM control not setup properly.\n"); - enable_rc6 = false; - } - - return enable_rc6; -} - -static bool sanitize_rc6(struct drm_i915_private *i915) -{ - struct intel_device_info *info = mkwrite_device_info(i915); - - /* Powersaving is controlled by the host when inside a VM */ - if (intel_vgpu_active(i915)) { - info->has_rc6 = 0; - info->has_rps = false; - } - - if (info->has_rc6 && - IS_GEN9_LP(i915) && !bxt_check_bios_rc6_setup(i915)) { - DRM_INFO("RC6 disabled by BIOS\n"); - info->has_rc6 = 0; - } - - /* - * We assume that we do not have any deep rc6 levels if we don't have - * have the previous rc6 level supported, i.e. we use HAS_RC6() - * as the initial coarse check for rc6 in general, moving on to - * progressively finer/deeper levels. - */ - if (!info->has_rc6 && info->has_rc6p) - info->has_rc6p = 0; - - return info->has_rc6; -} - -static void gen6_init_rps_frequencies(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* All of these values are in units of 50MHz */ - - /* static values from HW: RP0 > RP1 > RPn (min_freq) */ - if (IS_GEN9_LP(dev_priv)) { - u32 rp_state_cap = I915_READ(BXT_RP_STATE_CAP); - rps->rp0_freq = (rp_state_cap >> 16) & 0xff; - rps->rp1_freq = (rp_state_cap >> 8) & 0xff; - rps->min_freq = (rp_state_cap >> 0) & 0xff; - } else { - u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); - rps->rp0_freq = (rp_state_cap >> 0) & 0xff; - rps->rp1_freq = (rp_state_cap >> 8) & 0xff; - rps->min_freq = (rp_state_cap >> 16) & 0xff; - } - /* hw_max = RP0 until we check for overclocking */ - rps->max_freq = rps->rp0_freq; - - rps->efficient_freq = rps->rp1_freq; - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv) || - IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) { - u32 ddcc_status = 0; - - if (sandybridge_pcode_read(dev_priv, - HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, - &ddcc_status, NULL) == 0) - rps->efficient_freq = - clamp_t(u8, - ((ddcc_status >> 8) & 0xff), - rps->min_freq, - rps->max_freq); - } - - if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) { - /* Store the frequency values in 16.66 MHZ units, which is - * the natural hardware unit for SKL - */ - rps->rp0_freq *= GEN9_FREQ_SCALER; - rps->rp1_freq *= GEN9_FREQ_SCALER; - rps->min_freq *= GEN9_FREQ_SCALER; - rps->max_freq *= GEN9_FREQ_SCALER; - rps->efficient_freq *= GEN9_FREQ_SCALER; - } -} - -static void reset_rps(struct drm_i915_private *dev_priv, - int (*set)(struct drm_i915_private *, u8)) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u8 freq = rps->cur_freq; - - /* force a reset */ - rps->power.mode = -1; - rps->cur_freq = -1; - - if (set(dev_priv, freq)) - DRM_ERROR("Failed to reset RPS to initial values\n"); -} - -/* See the Gen9_GT_PM_Programming_Guide doc for the below */ -static void gen9_enable_rps(struct drm_i915_private *dev_priv) -{ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* Program defaults and thresholds for RPS */ - if (IS_GEN(dev_priv, 9)) - I915_WRITE(GEN6_RC_VIDEO_FREQ, - GEN9_FREQUENCY(dev_priv->gt_pm.rps.rp1_freq)); - - /* 1 second timeout*/ - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, - GT_INTERVAL_FROM_US(dev_priv, 1000000)); - - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa); - - /* Leaning on the below call to gen6_set_rps to program/setup the - * Up/Down EI & threshold registers, as well as the RP_CONTROL, - * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ - reset_rps(dev_priv, gen6_set_rps); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void gen11_enable_rc6(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - /* 1a: Software RC state - RC0 */ - I915_WRITE(GEN6_RC_STATE, 0); - - /* - * 1b: Get forcewake during program sequence. Although the driver - * hasn't enabled a state yet where we need forcewake, BIOS may have. - */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* 2a: Disable RC states. */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - /* 2b: Program RC6 thresholds.*/ - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); - I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150); - - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - - if (HAS_GT_UC(dev_priv)) - I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); - - I915_WRITE(GEN6_RC_SLEEP, 0); - - I915_WRITE(GEN6_RC6_THRESHOLD, 50000); /* 50/125ms per EI */ - - /* - * 2c: Program Coarse Power Gating Policies. - * - * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we - * use instead is a more conservative estimate for the maximum time - * it takes us to service a CS interrupt and submit a new ELSP - that - * is the time which the GPU is idle waiting for the CPU to select the - * next request to execute. If the idle hysteresis is less than that - * interrupt service latency, the hardware will automatically gate - * the power well and we will then incur the wake up cost on top of - * the service latency. A similar guide from plane_state is that we - * do not want the enable hysteresis to less than the wakeup latency. - * - * igt/gem_exec_nop/sequential provides a rough estimate for the - * service latency, and puts it around 10us for Broadwell (and other - * big core) and around 40us for Broxton (and other low power cores). - * [Note that for legacy ringbuffer submission, this is less than 1us!] - * However, the wakeup latency on Broxton is closer to 100us. To be - * conservative, we have to factor in a context switch on top (due - * to ksoftirqd). - */ - I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250); - I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250); - - /* 3a: Enable RC6 */ - I915_WRITE(GEN6_RC_CONTROL, - GEN6_RC_CTL_HW_ENABLE | - GEN6_RC_CTL_RC6_ENABLE | - GEN6_RC_CTL_EI_MODE(1)); - - /* 3b: Enable Coarse Power Gating only when RC6 is enabled. */ - I915_WRITE(GEN9_PG_ENABLE, - GEN9_RENDER_PG_ENABLE | - GEN9_MEDIA_PG_ENABLE | - GEN11_MEDIA_SAMPLER_PG_ENABLE); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void gen9_enable_rc6(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 rc6_mode; - - /* 1a: Software RC state - RC0 */ - I915_WRITE(GEN6_RC_STATE, 0); - - /* 1b: Get forcewake during program sequence. Although the driver - * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* 2a: Disable RC states. */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - /* 2b: Program RC6 thresholds.*/ - if (INTEL_GEN(dev_priv) >= 10) { - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16 | 85); - I915_WRITE(GEN10_MEDIA_WAKE_RATE_LIMIT, 150); - } else if (IS_SKYLAKE(dev_priv)) { - /* - * WaRsDoubleRc6WrlWithCoarsePowerGating:skl Doubling WRL only - * when CPG is enabled - */ - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16); - } else { - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16); - } - - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - - if (HAS_GT_UC(dev_priv)) - I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); - - I915_WRITE(GEN6_RC_SLEEP, 0); - - /* - * 2c: Program Coarse Power Gating Policies. - * - * Bspec's guidance is to use 25us (really 25 * 1280ns) here. What we - * use instead is a more conservative estimate for the maximum time - * it takes us to service a CS interrupt and submit a new ELSP - that - * is the time which the GPU is idle waiting for the CPU to select the - * next request to execute. If the idle hysteresis is less than that - * interrupt service latency, the hardware will automatically gate - * the power well and we will then incur the wake up cost on top of - * the service latency. A similar guide from plane_state is that we - * do not want the enable hysteresis to less than the wakeup latency. - * - * igt/gem_exec_nop/sequential provides a rough estimate for the - * service latency, and puts it around 10us for Broadwell (and other - * big core) and around 40us for Broxton (and other low power cores). - * [Note that for legacy ringbuffer submission, this is less than 1us!] - * However, the wakeup latency on Broxton is closer to 100us. To be - * conservative, we have to factor in a context switch on top (due - * to ksoftirqd). - */ - I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 250); - I915_WRITE(GEN9_RENDER_PG_IDLE_HYSTERESIS, 250); - - /* 3a: Enable RC6 */ - I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ - - /* WaRsUseTimeoutMode:cnl (pre-prod) */ - if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_C0)) - rc6_mode = GEN7_RC_CTL_TO_MODE; - else - rc6_mode = GEN6_RC_CTL_EI_MODE(1); - - I915_WRITE(GEN6_RC_CONTROL, - GEN6_RC_CTL_HW_ENABLE | - GEN6_RC_CTL_RC6_ENABLE | - rc6_mode); - - /* - * 3b: Enable Coarse Power Gating only when RC6 is enabled. - * WaRsDisableCoarsePowerGating:skl,cnl - Render/Media PG need to be disabled with RC6. - */ - if (NEEDS_WaRsDisableCoarsePowerGating(dev_priv)) - I915_WRITE(GEN9_PG_ENABLE, 0); - else - I915_WRITE(GEN9_PG_ENABLE, - GEN9_RENDER_PG_ENABLE | GEN9_MEDIA_PG_ENABLE); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void gen8_enable_rc6(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - - /* 1a: Software RC state - RC0 */ - I915_WRITE(GEN6_RC_STATE, 0); - - /* 1b: Get forcewake during program sequence. Although the driver - * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* 2a: Disable RC states. */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - /* 2b: Program RC6 thresholds.*/ - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - I915_WRITE(GEN6_RC_SLEEP, 0); - I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us/1.28 for TO */ - - /* 3: Enable RC6 */ - - I915_WRITE(GEN6_RC_CONTROL, - GEN6_RC_CTL_HW_ENABLE | - GEN7_RC_CTL_TO_MODE | - GEN6_RC_CTL_RC6_ENABLE); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void gen8_enable_rps(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* 1 Program defaults and thresholds for RPS*/ - I915_WRITE(GEN6_RPNSWREQ, - HSW_FREQUENCY(rps->rp1_freq)); - I915_WRITE(GEN6_RC_VIDEO_FREQ, - HSW_FREQUENCY(rps->rp1_freq)); - /* NB: Docs say 1s, and 1000000 - which aren't equivalent */ - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 100000000 / 128); /* 1 second timeout */ - - /* Docs recommend 900MHz, and 300 MHz respectively */ - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, - rps->max_freq_softlimit << 24 | - rps->min_freq_softlimit << 16); - - I915_WRITE(GEN6_RP_UP_THRESHOLD, 7600000 / 128); /* 76ms busyness per EI, 90% */ - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 31300000 / 128); /* 313ms busyness per EI, 70%*/ - I915_WRITE(GEN6_RP_UP_EI, 66000); /* 84.48ms, XXX: random? */ - I915_WRITE(GEN6_RP_DOWN_EI, 350000); /* 448ms, XXX: random? */ - - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); - - /* 2: Enable RPS */ - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_NORMAL_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_AVG); - - reset_rps(dev_priv, gen6_set_rps); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void gen6_enable_rc6(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 rc6vids, rc6_mask; - u32 gtfifodbg; - int ret; - - I915_WRITE(GEN6_RC_STATE, 0); - - /* Clear the DBG now so we don't confuse earlier errors */ - gtfifodbg = I915_READ(GTFIFODBG); - if (gtfifodbg) { - DRM_ERROR("GT fifo had a previous error %x\n", gtfifodbg); - I915_WRITE(GTFIFODBG, gtfifodbg); - } - - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* disable the counters and set deterministic thresholds */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); - I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - - for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - - I915_WRITE(GEN6_RC_SLEEP, 0); - I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); - if (IS_IVYBRIDGE(dev_priv)) - I915_WRITE(GEN6_RC6_THRESHOLD, 125000); - else - I915_WRITE(GEN6_RC6_THRESHOLD, 50000); - I915_WRITE(GEN6_RC6p_THRESHOLD, 150000); - I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ - - /* We don't use those on Haswell */ - rc6_mask = GEN6_RC_CTL_RC6_ENABLE; - if (HAS_RC6p(dev_priv)) - rc6_mask |= GEN6_RC_CTL_RC6p_ENABLE; - if (HAS_RC6pp(dev_priv)) - rc6_mask |= GEN6_RC_CTL_RC6pp_ENABLE; - I915_WRITE(GEN6_RC_CONTROL, - rc6_mask | - GEN6_RC_CTL_EI_MODE(1) | - GEN6_RC_CTL_HW_ENABLE); - - rc6vids = 0; - ret = sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, - &rc6vids, NULL); - if (IS_GEN(dev_priv, 6) && ret) { - DRM_DEBUG_DRIVER("Couldn't check for BIOS workaround\n"); - } else if (IS_GEN(dev_priv, 6) && (GEN6_DECODE_RC6_VID(rc6vids & 0xff) < 450)) { - DRM_DEBUG_DRIVER("You should update your BIOS. Correcting minimum rc6 voltage (%dmV->%dmV)\n", - GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); - rc6vids &= 0xffff00; - rc6vids |= GEN6_ENCODE_RC6_VID(450); - ret = sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); - if (ret) - DRM_ERROR("Couldn't fix incorrect rc6 voltage\n"); - } - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void gen6_enable_rps(struct drm_i915_private *dev_priv) -{ - /* Here begins a magic sequence of register writes to enable - * auto-downclocking. - * - * Perhaps there might be some value in exposing these to - * userspace... - */ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* Power down if completely idle for over 50ms */ - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 50000); - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); - - reset_rps(dev_priv, gen6_set_rps); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void gen6_update_ring_freq(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - const int min_freq = 15; - const int scaling_factor = 180; - unsigned int gpu_freq; - unsigned int max_ia_freq, min_ring_freq; - unsigned int max_gpu_freq, min_gpu_freq; - struct cpufreq_policy *policy; - - lockdep_assert_held(&rps->lock); - - if (rps->max_freq <= rps->min_freq) - return; - - policy = cpufreq_cpu_get(0); - if (policy) { - max_ia_freq = policy->cpuinfo.max_freq; - cpufreq_cpu_put(policy); - } else { - /* - * Default to measured freq if none found, PCU will ensure we - * don't go over - */ - max_ia_freq = tsc_khz; - } - - /* Convert from kHz to MHz */ - max_ia_freq /= 1000; - - min_ring_freq = I915_READ(DCLK) & 0xf; - /* convert DDR frequency from units of 266.6MHz to bandwidth */ - min_ring_freq = mult_frac(min_ring_freq, 8, 3); - - min_gpu_freq = rps->min_freq; - max_gpu_freq = rps->max_freq; - if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) { - /* Convert GT frequency to 50 HZ units */ - min_gpu_freq /= GEN9_FREQ_SCALER; - max_gpu_freq /= GEN9_FREQ_SCALER; - } - - /* - * For each potential GPU frequency, load a ring frequency we'd like - * to use for memory access. We do this by specifying the IA frequency - * the PCU should use as a reference to determine the ring frequency. - */ - for (gpu_freq = max_gpu_freq; gpu_freq >= min_gpu_freq; gpu_freq--) { - const int diff = max_gpu_freq - gpu_freq; - unsigned int ia_freq = 0, ring_freq = 0; - - if (IS_GEN9_BC(dev_priv) || INTEL_GEN(dev_priv) >= 10) { - /* - * ring_freq = 2 * GT. ring_freq is in 100MHz units - * No floor required for ring frequency on SKL. - */ - ring_freq = gpu_freq; - } else if (INTEL_GEN(dev_priv) >= 8) { - /* max(2 * GT, DDR). NB: GT is 50MHz units */ - ring_freq = max(min_ring_freq, gpu_freq); - } else if (IS_HASWELL(dev_priv)) { - ring_freq = mult_frac(gpu_freq, 5, 4); - ring_freq = max(min_ring_freq, ring_freq); - /* leave ia_freq as the default, chosen by cpufreq */ - } else { - /* On older processors, there is no separate ring - * clock domain, so in order to boost the bandwidth - * of the ring, we need to upclock the CPU (ia_freq). - * - * For GPU frequencies less than 750MHz, - * just use the lowest ring freq. - */ - if (gpu_freq < min_freq) - ia_freq = 800; - else - ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); - ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); - } - - sandybridge_pcode_write(dev_priv, - GEN6_PCODE_WRITE_MIN_FREQ_TABLE, - ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | - ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | - gpu_freq); - } -} - -static int cherryview_rps_max_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rp0; - - val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); - - switch (RUNTIME_INFO(dev_priv)->sseu.eu_total) { - case 8: - /* (2 * 4) config */ - rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); - break; - case 12: - /* (2 * 6) config */ - rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT); - break; - case 16: - /* (2 * 8) config */ - default: - /* Setting (2 * 8) Min RP0 for any other combination */ - rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT); - break; - } - - rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK); - - return rp0; -} - -static int cherryview_rps_rpe_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rpe; - - val = vlv_punit_read(dev_priv, PUNIT_GPU_DUTYCYCLE_REG); - rpe = (val >> PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT) & PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK; - - return rpe; -} - -static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rp1; - - val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); - rp1 = (val & FB_GFX_FREQ_FUSE_MASK); - - return rp1; -} - -static u32 cherryview_rps_min_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rpn; - - val = vlv_punit_read(dev_priv, FB_GFX_FMIN_AT_VMIN_FUSE); - rpn = ((val >> FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT) & - FB_GFX_FREQ_FUSE_MASK); - - return rpn; -} - -static int valleyview_rps_guar_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rp1; - - val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); - - rp1 = (val & FB_GFX_FGUARANTEED_FREQ_FUSE_MASK) >> FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT; - - return rp1; -} - -static int valleyview_rps_max_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rp0; - - val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FREQ_FUSE); - - rp0 = (val & FB_GFX_MAX_FREQ_FUSE_MASK) >> FB_GFX_MAX_FREQ_FUSE_SHIFT; - /* Clamp to max */ - rp0 = min_t(u32, rp0, 0xea); - - return rp0; -} - -static int valleyview_rps_rpe_freq(struct drm_i915_private *dev_priv) -{ - u32 val, rpe; - - val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_LO); - rpe = (val & FB_FMAX_VMIN_FREQ_LO_MASK) >> FB_FMAX_VMIN_FREQ_LO_SHIFT; - val = vlv_nc_read(dev_priv, IOSF_NC_FB_GFX_FMAX_FUSE_HI); - rpe |= (val & FB_FMAX_VMIN_FREQ_HI_MASK) << 5; - - return rpe; -} - -static int valleyview_rps_min_freq(struct drm_i915_private *dev_priv) -{ - u32 val; - - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_LFM) & 0xff; - /* - * According to the BYT Punit GPU turbo HAS 1.1.6.3 the minimum value - * for the minimum frequency in GPLL mode is 0xc1. Contrary to this on - * a BYT-M B0 the above register contains 0xbf. Moreover when setting - * a frequency Punit will not allow values below 0xc0. Clamp it 0xc0 - * to make sure it matches what Punit accepts. - */ - return max_t(u32, val, 0xc0); -} - -/* Check that the pctx buffer wasn't move under us. */ -static void valleyview_check_pctx(struct drm_i915_private *dev_priv) -{ - unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; - - WARN_ON(pctx_addr != dev_priv->dsm.start + - dev_priv->vlv_pctx->stolen->start); -} - - -/* Check that the pcbr address is not empty. */ -static void cherryview_check_pctx(struct drm_i915_private *dev_priv) -{ - unsigned long pctx_addr = I915_READ(VLV_PCBR) & ~4095; - - WARN_ON((pctx_addr >> VLV_PCBR_ADDR_SHIFT) == 0); -} - -static void cherryview_setup_pctx(struct drm_i915_private *dev_priv) -{ - resource_size_t pctx_paddr, paddr; - resource_size_t pctx_size = 32*1024; - u32 pcbr; - - pcbr = I915_READ(VLV_PCBR); - if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) { - DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); - paddr = dev_priv->dsm.end + 1 - pctx_size; - GEM_BUG_ON(paddr > U32_MAX); - - pctx_paddr = (paddr & (~4095)); - I915_WRITE(VLV_PCBR, pctx_paddr); - } - - DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); -} - -static void valleyview_setup_pctx(struct drm_i915_private *dev_priv) -{ - struct drm_i915_gem_object *pctx; - resource_size_t pctx_paddr; - resource_size_t pctx_size = 24*1024; - u32 pcbr; - - pcbr = I915_READ(VLV_PCBR); - if (pcbr) { - /* BIOS set it up already, grab the pre-alloc'd space */ - resource_size_t pcbr_offset; - - pcbr_offset = (pcbr & (~4095)) - dev_priv->dsm.start; - pctx = i915_gem_object_create_stolen_for_preallocated(dev_priv, - pcbr_offset, - I915_GTT_OFFSET_NONE, - pctx_size); - goto out; - } - - DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n"); - - /* - * From the Gunit register HAS: - * The Gfx driver is expected to program this register and ensure - * proper allocation within Gfx stolen memory. For example, this - * register should be programmed such than the PCBR range does not - * overlap with other ranges, such as the frame buffer, protected - * memory, or any other relevant ranges. - */ - pctx = i915_gem_object_create_stolen(dev_priv, pctx_size); - if (!pctx) { - DRM_DEBUG("not enough stolen space for PCTX, disabling\n"); - goto out; - } - - GEM_BUG_ON(range_overflows_t(u64, - dev_priv->dsm.start, - pctx->stolen->start, - U32_MAX)); - pctx_paddr = dev_priv->dsm.start + pctx->stolen->start; - I915_WRITE(VLV_PCBR, pctx_paddr); - -out: - DRM_DEBUG_DRIVER("PCBR: 0x%08x\n", I915_READ(VLV_PCBR)); - dev_priv->vlv_pctx = pctx; -} - -static void valleyview_cleanup_pctx(struct drm_i915_private *dev_priv) -{ - struct drm_i915_gem_object *pctx; - - pctx = fetch_and_zero(&dev_priv->vlv_pctx); - if (pctx) - i915_gem_object_put(pctx); -} - -static void vlv_init_gpll_ref_freq(struct drm_i915_private *dev_priv) -{ - dev_priv->gt_pm.rps.gpll_ref_freq = - vlv_get_cck_clock(dev_priv, "GPLL ref", - CCK_GPLL_CLOCK_CONTROL, - dev_priv->czclk_freq); - - DRM_DEBUG_DRIVER("GPLL reference freq: %d kHz\n", - dev_priv->gt_pm.rps.gpll_ref_freq); -} - -static void valleyview_init_gt_powersave(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u32 val; - - valleyview_setup_pctx(dev_priv); - - vlv_iosf_sb_get(dev_priv, - BIT(VLV_IOSF_SB_PUNIT) | - BIT(VLV_IOSF_SB_NC) | - BIT(VLV_IOSF_SB_CCK)); - - vlv_init_gpll_ref_freq(dev_priv); - - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); - switch ((val >> 6) & 3) { - case 0: - case 1: - dev_priv->mem_freq = 800; - break; - case 2: - dev_priv->mem_freq = 1066; - break; - case 3: - dev_priv->mem_freq = 1333; - break; - } - DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); - - rps->max_freq = valleyview_rps_max_freq(dev_priv); - rps->rp0_freq = rps->max_freq; - DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->max_freq), - rps->max_freq); - - rps->efficient_freq = valleyview_rps_rpe_freq(dev_priv); - DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->efficient_freq), - rps->efficient_freq); - - rps->rp1_freq = valleyview_rps_guar_freq(dev_priv); - DRM_DEBUG_DRIVER("RP1(Guar Freq) GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->rp1_freq), - rps->rp1_freq); - - rps->min_freq = valleyview_rps_min_freq(dev_priv); - DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->min_freq), - rps->min_freq); - - vlv_iosf_sb_put(dev_priv, - BIT(VLV_IOSF_SB_PUNIT) | - BIT(VLV_IOSF_SB_NC) | - BIT(VLV_IOSF_SB_CCK)); -} - -static void cherryview_init_gt_powersave(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - u32 val; - - cherryview_setup_pctx(dev_priv); - - vlv_iosf_sb_get(dev_priv, - BIT(VLV_IOSF_SB_PUNIT) | - BIT(VLV_IOSF_SB_NC) | - BIT(VLV_IOSF_SB_CCK)); - - vlv_init_gpll_ref_freq(dev_priv); - - val = vlv_cck_read(dev_priv, CCK_FUSE_REG); - - switch ((val >> 2) & 0x7) { - case 3: - dev_priv->mem_freq = 2000; - break; - default: - dev_priv->mem_freq = 1600; - break; - } - DRM_DEBUG_DRIVER("DDR speed: %d MHz\n", dev_priv->mem_freq); - - rps->max_freq = cherryview_rps_max_freq(dev_priv); - rps->rp0_freq = rps->max_freq; - DRM_DEBUG_DRIVER("max GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->max_freq), - rps->max_freq); - - rps->efficient_freq = cherryview_rps_rpe_freq(dev_priv); - DRM_DEBUG_DRIVER("RPe GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->efficient_freq), - rps->efficient_freq); - - rps->rp1_freq = cherryview_rps_guar_freq(dev_priv); - DRM_DEBUG_DRIVER("RP1(Guar) GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->rp1_freq), - rps->rp1_freq); - - rps->min_freq = cherryview_rps_min_freq(dev_priv); - DRM_DEBUG_DRIVER("min GPU freq: %d MHz (%u)\n", - intel_gpu_freq(dev_priv, rps->min_freq), - rps->min_freq); - - vlv_iosf_sb_put(dev_priv, - BIT(VLV_IOSF_SB_PUNIT) | - BIT(VLV_IOSF_SB_NC) | - BIT(VLV_IOSF_SB_CCK)); - - WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq | - rps->min_freq) & 1, - "Odd GPU freq values\n"); -} - -static void valleyview_cleanup_gt_powersave(struct drm_i915_private *dev_priv) -{ - valleyview_cleanup_pctx(dev_priv); -} - -static void cherryview_enable_rc6(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 gtfifodbg, rc6_mode, pcbr; - - gtfifodbg = I915_READ(GTFIFODBG) & ~(GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV | - GT_FIFO_FREE_ENTRIES_CHV); - if (gtfifodbg) { - DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", - gtfifodbg); - I915_WRITE(GTFIFODBG, gtfifodbg); - } - - cherryview_check_pctx(dev_priv); - - /* 1a & 1b: Get forcewake during program sequence. Although the driver - * hasn't enabled a state yet where we need forcewake, BIOS may have.*/ - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* Disable RC states. */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - /* 2a: Program RC6 thresholds.*/ - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16); - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); /* 12500 * 1280ns */ - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); /* 25 * 1280ns */ - - for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - I915_WRITE(GEN6_RC_SLEEP, 0); - - /* TO threshold set to 500 us ( 0x186 * 1.28 us) */ - I915_WRITE(GEN6_RC6_THRESHOLD, 0x186); - - /* Allows RC6 residency counter to work */ - I915_WRITE(VLV_COUNTER_CONTROL, - _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | - VLV_MEDIA_RC6_COUNT_EN | - VLV_RENDER_RC6_COUNT_EN)); - - /* For now we assume BIOS is allocating and populating the PCBR */ - pcbr = I915_READ(VLV_PCBR); - - /* 3: Enable RC6 */ - rc6_mode = 0; - if (pcbr >> VLV_PCBR_ADDR_SHIFT) - rc6_mode = GEN7_RC_CTL_TO_MODE; - I915_WRITE(GEN6_RC_CONTROL, rc6_mode); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void cherryview_enable_rps(struct drm_i915_private *dev_priv) -{ - u32 val; - - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* 1: Program defaults and thresholds for RPS*/ - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); - I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); - I915_WRITE(GEN6_RP_UP_EI, 66000); - I915_WRITE(GEN6_RP_DOWN_EI, 350000); - - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); - - /* 2: Enable RPS */ - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_HW_NORMAL_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_AVG); - - /* Setting Fixed Bias */ - vlv_punit_get(dev_priv); - - val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | CHV_BIAS_CPU_50_SOC_50; - vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); - - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); - - vlv_punit_put(dev_priv); - - /* RPS code assumes GPLL is used */ - WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); - - DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); - DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); - - reset_rps(dev_priv, valleyview_set_rps); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void valleyview_enable_rc6(struct drm_i915_private *dev_priv) -{ - struct intel_engine_cs *engine; - enum intel_engine_id id; - u32 gtfifodbg; - - valleyview_check_pctx(dev_priv); - - gtfifodbg = I915_READ(GTFIFODBG); - if (gtfifodbg) { - DRM_DEBUG_DRIVER("GT fifo had a previous error %x\n", - gtfifodbg); - I915_WRITE(GTFIFODBG, gtfifodbg); - } - - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - /* Disable RC states. */ - I915_WRITE(GEN6_RC_CONTROL, 0); - - I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 0x00280000); - I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); - I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); - - for_each_engine(engine, dev_priv, id) - I915_WRITE(RING_MAX_IDLE(engine->mmio_base), 10); - - I915_WRITE(GEN6_RC6_THRESHOLD, 0x557); - - /* Allows RC6 residency counter to work */ - I915_WRITE(VLV_COUNTER_CONTROL, - _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH | - VLV_MEDIA_RC0_COUNT_EN | - VLV_RENDER_RC0_COUNT_EN | - VLV_MEDIA_RC6_COUNT_EN | - VLV_RENDER_RC6_COUNT_EN)); - - I915_WRITE(GEN6_RC_CONTROL, - GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static void valleyview_enable_rps(struct drm_i915_private *dev_priv) -{ - u32 val; - - intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); - - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); - I915_WRITE(GEN6_RP_UP_THRESHOLD, 59400); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 245000); - I915_WRITE(GEN6_RP_UP_EI, 66000); - I915_WRITE(GEN6_RP_DOWN_EI, 350000); - - I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); - - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_NORMAL_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_CONT); - - vlv_punit_get(dev_priv); - - /* Setting Fixed Bias */ - val = VLV_OVERRIDE_EN | VLV_SOC_TDP_EN | VLV_BIAS_CPU_125_SOC_875; - vlv_punit_write(dev_priv, VLV_TURBO_SOC_OVERRIDE, val); - - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); - - vlv_punit_put(dev_priv); - - /* RPS code assumes GPLL is used */ - WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n"); - - DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE)); - DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val); - - reset_rps(dev_priv, valleyview_set_rps); - - intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); -} - -static unsigned long intel_pxfreq(u32 vidfreq) -{ - unsigned long freq; - int div = (vidfreq & 0x3f0000) >> 16; - int post = (vidfreq & 0x3000) >> 12; - int pre = (vidfreq & 0x7); - - if (!pre) - return 0; - - freq = ((div * 133333) / ((1<<post) * pre)); - - return freq; -} - -static const struct cparams { - u16 i; - u16 t; - u16 m; - u16 c; -} cparams[] = { - { 1, 1333, 301, 28664 }, - { 1, 1066, 294, 24460 }, - { 1, 800, 294, 25192 }, - { 0, 1333, 276, 27605 }, - { 0, 1066, 276, 27605 }, - { 0, 800, 231, 23784 }, -}; - -static unsigned long __i915_chipset_val(struct drm_i915_private *dev_priv) -{ - u64 total_count, diff, ret; - u32 count1, count2, count3, m = 0, c = 0; - unsigned long now = jiffies_to_msecs(jiffies), diff1; - int i; - - lockdep_assert_held(&mchdev_lock); - - diff1 = now - dev_priv->ips.last_time1; - - /* Prevent division-by-zero if we are asking too fast. - * Also, we don't get interesting results if we are polling - * faster than once in 10ms, so just return the saved value - * in such cases. - */ - if (diff1 <= 10) - return dev_priv->ips.chipset_power; - - count1 = I915_READ(DMIEC); - count2 = I915_READ(DDREC); - count3 = I915_READ(CSIEC); - - total_count = count1 + count2 + count3; - - /* FIXME: handle per-counter overflow */ - if (total_count < dev_priv->ips.last_count1) { - diff = ~0UL - dev_priv->ips.last_count1; - diff += total_count; - } else { - diff = total_count - dev_priv->ips.last_count1; - } - - for (i = 0; i < ARRAY_SIZE(cparams); i++) { - if (cparams[i].i == dev_priv->ips.c_m && - cparams[i].t == dev_priv->ips.r_t) { - m = cparams[i].m; - c = cparams[i].c; - break; - } - } - - diff = div_u64(diff, diff1); - ret = ((m * diff) + c); - ret = div_u64(ret, 10); - - dev_priv->ips.last_count1 = total_count; - dev_priv->ips.last_time1 = now; - - dev_priv->ips.chipset_power = ret; - - return ret; -} - -unsigned long i915_chipset_val(struct drm_i915_private *dev_priv) -{ - intel_wakeref_t wakeref; - unsigned long val = 0; - - if (!IS_GEN(dev_priv, 5)) - return 0; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - spin_lock_irq(&mchdev_lock); - val = __i915_chipset_val(dev_priv); - spin_unlock_irq(&mchdev_lock); - } - - return val; -} - -unsigned long i915_mch_val(struct drm_i915_private *i915) -{ - unsigned long m, x, b; - u32 tsfs; - - tsfs = intel_uncore_read(&i915->uncore, TSFS); - - m = ((tsfs & TSFS_SLOPE_MASK) >> TSFS_SLOPE_SHIFT); - x = intel_uncore_read8(&i915->uncore, TR1); - - b = tsfs & TSFS_INTR_MASK; - - return ((m * x) / 127) - b; -} - -static int _pxvid_to_vd(u8 pxvid) -{ - if (pxvid == 0) - return 0; - - if (pxvid >= 8 && pxvid < 31) - pxvid = 31; - - return (pxvid + 2) * 125; -} - -static u32 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid) -{ - const int vd = _pxvid_to_vd(pxvid); - const int vm = vd - 1125; - - if (INTEL_INFO(dev_priv)->is_mobile) - return vm > 0 ? vm : 0; - - return vd; -} - -static void __i915_update_gfx_val(struct drm_i915_private *dev_priv) -{ - u64 now, diff, diffms; - u32 count; - - lockdep_assert_held(&mchdev_lock); - - now = ktime_get_raw_ns(); - diffms = now - dev_priv->ips.last_time2; - do_div(diffms, NSEC_PER_MSEC); - - /* Don't divide by 0 */ - if (!diffms) - return; - - count = I915_READ(GFXEC); - - if (count < dev_priv->ips.last_count2) { - diff = ~0UL - dev_priv->ips.last_count2; - diff += count; - } else { - diff = count - dev_priv->ips.last_count2; - } - - dev_priv->ips.last_count2 = count; - dev_priv->ips.last_time2 = now; - - /* More magic constants... */ - diff = diff * 1181; - diff = div_u64(diff, diffms * 10); - dev_priv->ips.gfx_power = diff; -} - -void i915_update_gfx_val(struct drm_i915_private *dev_priv) -{ - intel_wakeref_t wakeref; - - if (!IS_GEN(dev_priv, 5)) - return; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - spin_lock_irq(&mchdev_lock); - __i915_update_gfx_val(dev_priv); - spin_unlock_irq(&mchdev_lock); - } -} - -static unsigned long __i915_gfx_val(struct drm_i915_private *dev_priv) -{ - unsigned long t, corr, state1, corr2, state2; - u32 pxvid, ext_v; - - lockdep_assert_held(&mchdev_lock); - - pxvid = I915_READ(PXVFREQ(dev_priv->gt_pm.rps.cur_freq)); - pxvid = (pxvid >> 24) & 0x7f; - ext_v = pvid_to_extvid(dev_priv, pxvid); - - state1 = ext_v; - - t = i915_mch_val(dev_priv); - - /* Revel in the empirically derived constants */ - - /* Correction factor in 1/100000 units */ - if (t > 80) - corr = ((t * 2349) + 135940); - else if (t >= 50) - corr = ((t * 964) + 29317); - else /* < 50 */ - corr = ((t * 301) + 1004); - - corr = corr * ((150142 * state1) / 10000 - 78642); - corr /= 100000; - corr2 = (corr * dev_priv->ips.corr); - - state2 = (corr2 * state1) / 10000; - state2 /= 100; /* convert to mW */ - - __i915_update_gfx_val(dev_priv); - - return dev_priv->ips.gfx_power + state2; -} - -unsigned long i915_gfx_val(struct drm_i915_private *dev_priv) -{ - intel_wakeref_t wakeref; - unsigned long val = 0; - - if (!IS_GEN(dev_priv, 5)) - return 0; - - with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) { - spin_lock_irq(&mchdev_lock); - val = __i915_gfx_val(dev_priv); - spin_unlock_irq(&mchdev_lock); - } - - return val; -} - -static struct drm_i915_private __rcu *i915_mch_dev; - -static struct drm_i915_private *mchdev_get(void) -{ - struct drm_i915_private *i915; - - rcu_read_lock(); - i915 = rcu_dereference(i915_mch_dev); - if (!kref_get_unless_zero(&i915->drm.ref)) - i915 = NULL; - rcu_read_unlock(); - - return i915; -} - -/** - * i915_read_mch_val - return value for IPS use - * - * Calculate and return a value for the IPS driver to use when deciding whether - * we have thermal and power headroom to increase CPU or GPU power budget. - */ -unsigned long i915_read_mch_val(void) -{ - struct drm_i915_private *i915; - unsigned long chipset_val = 0; - unsigned long graphics_val = 0; - intel_wakeref_t wakeref; - - i915 = mchdev_get(); - if (!i915) - return 0; - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - spin_lock_irq(&mchdev_lock); - chipset_val = __i915_chipset_val(i915); - graphics_val = __i915_gfx_val(i915); - spin_unlock_irq(&mchdev_lock); - } - - drm_dev_put(&i915->drm); - return chipset_val + graphics_val; -} -EXPORT_SYMBOL_GPL(i915_read_mch_val); - -/** - * i915_gpu_raise - raise GPU frequency limit - * - * Raise the limit; IPS indicates we have thermal headroom. - */ -bool i915_gpu_raise(void) -{ - struct drm_i915_private *i915; - - i915 = mchdev_get(); - if (!i915) - return false; - - spin_lock_irq(&mchdev_lock); - if (i915->ips.max_delay > i915->ips.fmax) - i915->ips.max_delay--; - spin_unlock_irq(&mchdev_lock); - - drm_dev_put(&i915->drm); - return true; -} -EXPORT_SYMBOL_GPL(i915_gpu_raise); - -/** - * i915_gpu_lower - lower GPU frequency limit - * - * IPS indicates we're close to a thermal limit, so throttle back the GPU - * frequency maximum. - */ -bool i915_gpu_lower(void) -{ - struct drm_i915_private *i915; - - i915 = mchdev_get(); - if (!i915) - return false; - - spin_lock_irq(&mchdev_lock); - if (i915->ips.max_delay < i915->ips.min_delay) - i915->ips.max_delay++; - spin_unlock_irq(&mchdev_lock); - - drm_dev_put(&i915->drm); - return true; -} -EXPORT_SYMBOL_GPL(i915_gpu_lower); - -/** - * i915_gpu_busy - indicate GPU business to IPS - * - * Tell the IPS driver whether or not the GPU is busy. - */ -bool i915_gpu_busy(void) -{ - struct drm_i915_private *i915; - bool ret; - - i915 = mchdev_get(); - if (!i915) - return false; - - ret = i915->gt.awake; - - drm_dev_put(&i915->drm); - return ret; -} -EXPORT_SYMBOL_GPL(i915_gpu_busy); - -/** - * i915_gpu_turbo_disable - disable graphics turbo - * - * Disable graphics turbo by resetting the max frequency and setting the - * current frequency to the default. - */ -bool i915_gpu_turbo_disable(void) -{ - struct drm_i915_private *i915; - bool ret; - - i915 = mchdev_get(); - if (!i915) - return false; - - spin_lock_irq(&mchdev_lock); - i915->ips.max_delay = i915->ips.fstart; - ret = ironlake_set_drps(i915, i915->ips.fstart); - spin_unlock_irq(&mchdev_lock); - - drm_dev_put(&i915->drm); - return ret; -} -EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable); - -/** - * Tells the intel_ips driver that the i915 driver is now loaded, if - * IPS got loaded first. - * - * This awkward dance is so that neither module has to depend on the - * other in order for IPS to do the appropriate communication of - * GPU turbo limits to i915. - */ -static void -ips_ping_for_i915_load(void) -{ - void (*link)(void); - - link = symbol_get(ips_link_to_i915_driver); - if (link) { - link(); - symbol_put(ips_link_to_i915_driver); - } -} - -void intel_gpu_ips_init(struct drm_i915_private *dev_priv) -{ - /* We only register the i915 ips part with intel-ips once everything is - * set up, to avoid intel-ips sneaking in and reading bogus values. */ - rcu_assign_pointer(i915_mch_dev, dev_priv); - - ips_ping_for_i915_load(); -} - -void intel_gpu_ips_teardown(void) -{ - rcu_assign_pointer(i915_mch_dev, NULL); -} - -static void intel_init_emon(struct drm_i915_private *dev_priv) -{ - u32 lcfuse; - u8 pxw[16]; - int i; - - /* Disable to program */ - I915_WRITE(ECR, 0); - POSTING_READ(ECR); - - /* Program energy weights for various events */ - I915_WRITE(SDEW, 0x15040d00); - I915_WRITE(CSIEW0, 0x007f0000); - I915_WRITE(CSIEW1, 0x1e220004); - I915_WRITE(CSIEW2, 0x04000004); - - for (i = 0; i < 5; i++) - I915_WRITE(PEW(i), 0); - for (i = 0; i < 3; i++) - I915_WRITE(DEW(i), 0); - - /* Program P-state weights to account for frequency power adjustment */ - for (i = 0; i < 16; i++) { - u32 pxvidfreq = I915_READ(PXVFREQ(i)); - unsigned long freq = intel_pxfreq(pxvidfreq); - unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> - PXVFREQ_PX_SHIFT; - unsigned long val; - - val = vid * vid; - val *= (freq / 1000); - val *= 255; - val /= (127*127*900); - if (val > 0xff) - DRM_ERROR("bad pxval: %ld\n", val); - pxw[i] = val; - } - /* Render standby states get 0 weight */ - pxw[14] = 0; - pxw[15] = 0; - - for (i = 0; i < 4; i++) { - u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | - (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); - I915_WRITE(PXW(i), val); - } - - /* Adjust magic regs to magic values (more experimental results) */ - I915_WRITE(OGW0, 0); - I915_WRITE(OGW1, 0); - I915_WRITE(EG0, 0x00007f00); - I915_WRITE(EG1, 0x0000000e); - I915_WRITE(EG2, 0x000e0000); - I915_WRITE(EG3, 0x68000300); - I915_WRITE(EG4, 0x42000000); - I915_WRITE(EG5, 0x00140031); - I915_WRITE(EG6, 0); - I915_WRITE(EG7, 0); - - for (i = 0; i < 8; i++) - I915_WRITE(PXWL(i), 0); - - /* Enable PMON + select events */ - I915_WRITE(ECR, 0x80000019); - - lcfuse = I915_READ(LCFUSE02); - - dev_priv->ips.corr = (lcfuse & LCFUSE_HIV_MASK); -} - -static bool i915_rc6_ctx_corrupted(struct drm_i915_private *dev_priv) -{ - return !I915_READ(GEN8_RC6_CTX_INFO); -} - -static void i915_rc6_ctx_wa_init(struct drm_i915_private *i915) -{ - if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) - return; - - if (i915_rc6_ctx_corrupted(i915)) { - DRM_INFO("RC6 context corrupted, disabling runtime power management\n"); - i915->gt_pm.rc6.ctx_corrupted = true; - i915->gt_pm.rc6.ctx_corrupted_wakeref = - intel_runtime_pm_get(&i915->runtime_pm); - } -} - -static void i915_rc6_ctx_wa_cleanup(struct drm_i915_private *i915) -{ - if (i915->gt_pm.rc6.ctx_corrupted) { - intel_runtime_pm_put(&i915->runtime_pm, - i915->gt_pm.rc6.ctx_corrupted_wakeref); - i915->gt_pm.rc6.ctx_corrupted = false; - } -} - -/** - * i915_rc6_ctx_wa_suspend - system suspend sequence for the RC6 CTX WA - * @i915: i915 device - * - * Perform any steps needed to clean up the RC6 CTX WA before system suspend. - */ -void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915) -{ - if (i915->gt_pm.rc6.ctx_corrupted) - intel_runtime_pm_put(&i915->runtime_pm, - i915->gt_pm.rc6.ctx_corrupted_wakeref); -} - -/** - * i915_rc6_ctx_wa_resume - system resume sequence for the RC6 CTX WA - * @i915: i915 device - * - * Perform any steps needed to re-init the RC6 CTX WA after system resume. - */ -void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915) -{ - if (!i915->gt_pm.rc6.ctx_corrupted) - return; - - if (i915_rc6_ctx_corrupted(i915)) { - i915->gt_pm.rc6.ctx_corrupted_wakeref = - intel_runtime_pm_get(&i915->runtime_pm); - return; - } - - DRM_INFO("RC6 context restored, re-enabling runtime power management\n"); - i915->gt_pm.rc6.ctx_corrupted = false; -} - -static void intel_disable_rc6(struct drm_i915_private *dev_priv); - -/** - * i915_rc6_ctx_wa_check - check for a new RC6 CTX corruption - * @i915: i915 device - * - * Check if an RC6 CTX corruption has happened since the last check and if so - * disable RC6 and runtime power management. - * - * Return false if no context corruption has happened since the last call of - * this function, true otherwise. -*/ -bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915) -{ - if (!NEEDS_RC6_CTX_CORRUPTION_WA(i915)) - return false; - - if (i915->gt_pm.rc6.ctx_corrupted) - return false; - - if (!i915_rc6_ctx_corrupted(i915)) - return false; - - DRM_NOTE("RC6 context corruption, disabling runtime power management\n"); - - intel_disable_rc6(i915); - i915->gt_pm.rc6.ctx_corrupted = true; - i915->gt_pm.rc6.ctx_corrupted_wakeref = - intel_runtime_pm_get_noresume(&i915->runtime_pm); - - return true; -} - -void intel_init_gt_powersave(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* - * RPM depends on RC6 to save restore the GT HW context, so make RC6 a - * requirement. - */ - if (!sanitize_rc6(dev_priv)) { - DRM_INFO("RC6 disabled, disabling runtime PM support\n"); - pm_runtime_get(&dev_priv->drm.pdev->dev); - } - - i915_rc6_ctx_wa_init(dev_priv); - - /* Initialize RPS limits (for userspace) */ - if (IS_CHERRYVIEW(dev_priv)) - cherryview_init_gt_powersave(dev_priv); - else if (IS_VALLEYVIEW(dev_priv)) - valleyview_init_gt_powersave(dev_priv); - else if (INTEL_GEN(dev_priv) >= 6) - gen6_init_rps_frequencies(dev_priv); - - /* Derive initial user preferences/limits from the hardware limits */ - rps->max_freq_softlimit = rps->max_freq; - rps->min_freq_softlimit = rps->min_freq; - - /* After setting max-softlimit, find the overclock max freq */ - if (IS_GEN(dev_priv, 6) || - IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) { - u32 params = 0; - - sandybridge_pcode_read(dev_priv, GEN6_READ_OC_PARAMS, - ¶ms, NULL); - if (params & BIT(31)) { /* OC supported */ - DRM_DEBUG_DRIVER("Overclocking supported, max: %dMHz, overclock: %dMHz\n", - (rps->max_freq & 0xff) * 50, - (params & 0xff) * 50); - rps->max_freq = params & 0xff; - } - } - - /* Finally allow us to boost to max by default */ - rps->boost_freq = rps->max_freq; - rps->idle_freq = rps->min_freq; - rps->cur_freq = rps->idle_freq; -} - -void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv) -{ - if (IS_VALLEYVIEW(dev_priv)) - valleyview_cleanup_gt_powersave(dev_priv); - - i915_rc6_ctx_wa_cleanup(dev_priv); - - if (!HAS_RC6(dev_priv)) - pm_runtime_put(&dev_priv->drm.pdev->dev); -} - -void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv) -{ - dev_priv->gt_pm.rps.enabled = true; /* force RPS disabling */ - dev_priv->gt_pm.rc6.enabled = true; /* force RC6 disabling */ - intel_disable_gt_powersave(dev_priv); - - if (INTEL_GEN(dev_priv) >= 11) - gen11_reset_rps_interrupts(dev_priv); - else if (INTEL_GEN(dev_priv) >= 6) - gen6_reset_rps_interrupts(dev_priv); -} - -static inline void intel_disable_llc_pstate(struct drm_i915_private *i915) -{ - lockdep_assert_held(&i915->gt_pm.rps.lock); - - if (!i915->gt_pm.llc_pstate.enabled) - return; - - /* Currently there is no HW configuration to be done to disable. */ - - i915->gt_pm.llc_pstate.enabled = false; -} - -static void __intel_disable_rc6(struct drm_i915_private *dev_priv) -{ - lockdep_assert_held(&dev_priv->gt_pm.rps.lock); - - if (!dev_priv->gt_pm.rc6.enabled) - return; - - if (INTEL_GEN(dev_priv) >= 9) - gen9_disable_rc6(dev_priv); - else if (IS_CHERRYVIEW(dev_priv)) - cherryview_disable_rc6(dev_priv); - else if (IS_VALLEYVIEW(dev_priv)) - valleyview_disable_rc6(dev_priv); - else if (INTEL_GEN(dev_priv) >= 6) - gen6_disable_rc6(dev_priv); - - dev_priv->gt_pm.rc6.enabled = false; -} - -static void intel_disable_rc6(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - mutex_lock(&rps->lock); - __intel_disable_rc6(dev_priv); - mutex_unlock(&rps->lock); -} - -static void intel_disable_rps(struct drm_i915_private *dev_priv) -{ - lockdep_assert_held(&dev_priv->gt_pm.rps.lock); - - if (!dev_priv->gt_pm.rps.enabled) - return; - - if (INTEL_GEN(dev_priv) >= 9) - gen9_disable_rps(dev_priv); - else if (IS_CHERRYVIEW(dev_priv)) - cherryview_disable_rps(dev_priv); - else if (IS_VALLEYVIEW(dev_priv)) - valleyview_disable_rps(dev_priv); - else if (INTEL_GEN(dev_priv) >= 6) - gen6_disable_rps(dev_priv); - else if (IS_IRONLAKE_M(dev_priv)) - ironlake_disable_drps(dev_priv); - - dev_priv->gt_pm.rps.enabled = false; -} - -void intel_disable_gt_powersave(struct drm_i915_private *dev_priv) -{ - mutex_lock(&dev_priv->gt_pm.rps.lock); - - __intel_disable_rc6(dev_priv); - intel_disable_rps(dev_priv); - if (HAS_LLC(dev_priv)) - intel_disable_llc_pstate(dev_priv); - - mutex_unlock(&dev_priv->gt_pm.rps.lock); -} - -static inline void intel_enable_llc_pstate(struct drm_i915_private *i915) -{ - lockdep_assert_held(&i915->gt_pm.rps.lock); - - if (i915->gt_pm.llc_pstate.enabled) - return; - - gen6_update_ring_freq(i915); - - i915->gt_pm.llc_pstate.enabled = true; -} - -static void intel_enable_rc6(struct drm_i915_private *dev_priv) -{ - lockdep_assert_held(&dev_priv->gt_pm.rps.lock); - - if (dev_priv->gt_pm.rc6.enabled) - return; - - if (dev_priv->gt_pm.rc6.ctx_corrupted) - return; - - if (IS_CHERRYVIEW(dev_priv)) - cherryview_enable_rc6(dev_priv); - else if (IS_VALLEYVIEW(dev_priv)) - valleyview_enable_rc6(dev_priv); - else if (INTEL_GEN(dev_priv) >= 11) - gen11_enable_rc6(dev_priv); - else if (INTEL_GEN(dev_priv) >= 9) - gen9_enable_rc6(dev_priv); - else if (IS_BROADWELL(dev_priv)) - gen8_enable_rc6(dev_priv); - else if (INTEL_GEN(dev_priv) >= 6) - gen6_enable_rc6(dev_priv); - - dev_priv->gt_pm.rc6.enabled = true; -} - -static void intel_enable_rps(struct drm_i915_private *dev_priv) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - lockdep_assert_held(&rps->lock); - - if (rps->enabled) - return; - - if (IS_CHERRYVIEW(dev_priv)) { - cherryview_enable_rps(dev_priv); - } else if (IS_VALLEYVIEW(dev_priv)) { - valleyview_enable_rps(dev_priv); - } else if (INTEL_GEN(dev_priv) >= 9) { - gen9_enable_rps(dev_priv); - } else if (IS_BROADWELL(dev_priv)) { - gen8_enable_rps(dev_priv); - } else if (INTEL_GEN(dev_priv) >= 6) { - gen6_enable_rps(dev_priv); - } else if (IS_IRONLAKE_M(dev_priv)) { - ironlake_enable_drps(dev_priv); - intel_init_emon(dev_priv); - } - - WARN_ON(rps->max_freq < rps->min_freq); - WARN_ON(rps->idle_freq > rps->max_freq); - - WARN_ON(rps->efficient_freq < rps->min_freq); - WARN_ON(rps->efficient_freq > rps->max_freq); - - rps->enabled = true; -} - -void intel_enable_gt_powersave(struct drm_i915_private *dev_priv) -{ - /* Powersaving is controlled by the host when inside a VM */ - if (intel_vgpu_active(dev_priv)) - return; - - mutex_lock(&dev_priv->gt_pm.rps.lock); - - if (HAS_RC6(dev_priv)) - intel_enable_rc6(dev_priv); - if (HAS_RPS(dev_priv)) - intel_enable_rps(dev_priv); - if (HAS_LLC(dev_priv)) - intel_enable_llc_pstate(dev_priv); - - mutex_unlock(&dev_priv->gt_pm.rps.lock); -} - static void ibx_init_clock_gating(struct drm_i915_private *dev_priv) { /* @@ -8976,7 +6347,7 @@ static void ilk_init_clock_gating(struct drm_i915_private *dev_priv) static void cpt_init_clock_gating(struct drm_i915_private *dev_priv) { - int pipe; + enum pipe pipe; u32 val; /* @@ -9196,6 +6567,22 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv) _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE)); } +static void tgl_init_clock_gating(struct drm_i915_private *dev_priv) +{ + u32 vd_pg_enable = 0; + unsigned int i; + + /* This is not a WA. Enable VD HCP & MFX_ENC powergate */ + for (i = 0; i < I915_MAX_VCS; i++) { + if (HAS_ENGINE(dev_priv, _VCS(i))) + vd_pg_enable |= VDN_HCP_POWERGATE_ENABLE(i) | + VDN_MFX_POWERGATE_ENABLE(i); + } + + I915_WRITE(POWERGATE_ENABLE, + I915_READ(POWERGATE_ENABLE) | vd_pg_enable); +} + static void cnp_init_clock_gating(struct drm_i915_private *dev_priv) { if (!HAS_PCH_CNP(dev_priv)) @@ -9716,7 +7103,7 @@ static void nop_init_clock_gating(struct drm_i915_private *dev_priv) void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv) { if (IS_GEN(dev_priv, 12)) - dev_priv->display.init_clock_gating = nop_init_clock_gating; + dev_priv->display.init_clock_gating = tgl_init_clock_gating; else if (IS_GEN(dev_priv, 11)) dev_priv->display.init_clock_gating = icl_init_clock_gating; else if (IS_CANNONLAKE(dev_priv)) @@ -9772,6 +7159,9 @@ void intel_init_pm(struct drm_i915_private *dev_priv) else if (IS_GEN(dev_priv, 5)) i915_ironlake_get_mem_freq(dev_priv); + if (intel_has_sagv(dev_priv)) + skl_setup_sagv_block_time(dev_priv); + /* For FIFO watermark updates */ if (INTEL_GEN(dev_priv) >= 9) { skl_setup_wm_latency(dev_priv); @@ -9830,7 +7220,7 @@ void intel_init_pm(struct drm_i915_private *dev_priv) dev_priv->display.update_wm = i9xx_update_wm; dev_priv->display.get_fifo_size = i9xx_get_fifo_size; } else if (IS_GEN(dev_priv, 2)) { - if (INTEL_INFO(dev_priv)->num_pipes == 1) { + if (INTEL_NUM_PIPES(dev_priv) == 1) { dev_priv->display.update_wm = i845_update_wm; dev_priv->display.get_fifo_size = i845_get_fifo_size; } else { @@ -9842,217 +7232,8 @@ void intel_init_pm(struct drm_i915_private *dev_priv) } } -static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* - * N = val - 0xb7 - * Slow = Fast = GPLL ref * N - */ - return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * (val - 0xb7), 1000); -} - -static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - return DIV_ROUND_CLOSEST(1000 * val, rps->gpll_ref_freq) + 0xb7; -} - -static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* - * N = val / 2 - * CU (slow) = CU2x (fast) / 2 = GPLL ref * N / 2 - */ - return DIV_ROUND_CLOSEST(rps->gpll_ref_freq * val, 2 * 2 * 1000); -} - -static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) -{ - struct intel_rps *rps = &dev_priv->gt_pm.rps; - - /* CHV needs even values */ - return DIV_ROUND_CLOSEST(2 * 1000 * val, rps->gpll_ref_freq) * 2; -} - -int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) -{ - if (INTEL_GEN(dev_priv) >= 9) - return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER, - GEN9_FREQ_SCALER); - else if (IS_CHERRYVIEW(dev_priv)) - return chv_gpu_freq(dev_priv, val); - else if (IS_VALLEYVIEW(dev_priv)) - return byt_gpu_freq(dev_priv, val); - else - return val * GT_FREQUENCY_MULTIPLIER; -} - -int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) -{ - if (INTEL_GEN(dev_priv) >= 9) - return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER, - GT_FREQUENCY_MULTIPLIER); - else if (IS_CHERRYVIEW(dev_priv)) - return chv_freq_opcode(dev_priv, val); - else if (IS_VALLEYVIEW(dev_priv)) - return byt_freq_opcode(dev_priv, val); - else - return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER); -} - void intel_pm_setup(struct drm_i915_private *dev_priv) { - mutex_init(&dev_priv->gt_pm.rps.lock); - mutex_init(&dev_priv->gt_pm.rps.power.mutex); - - atomic_set(&dev_priv->gt_pm.rps.num_waiters, 0); - dev_priv->runtime_pm.suspended = false; atomic_set(&dev_priv->runtime_pm.wakeref_count, 0); } - -static u64 vlv_residency_raw(struct drm_i915_private *dev_priv, - const i915_reg_t reg) -{ - u32 lower, upper, tmp; - int loop = 2; - - /* - * The register accessed do not need forcewake. We borrow - * uncore lock to prevent concurrent access to range reg. - */ - lockdep_assert_held(&dev_priv->uncore.lock); - - /* - * vlv and chv residency counters are 40 bits in width. - * With a control bit, we can choose between upper or lower - * 32bit window into this counter. - * - * Although we always use the counter in high-range mode elsewhere, - * userspace may attempt to read the value before rc6 is initialised, - * before we have set the default VLV_COUNTER_CONTROL value. So always - * set the high bit to be safe. - */ - I915_WRITE_FW(VLV_COUNTER_CONTROL, - _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); - upper = I915_READ_FW(reg); - do { - tmp = upper; - - I915_WRITE_FW(VLV_COUNTER_CONTROL, - _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH)); - lower = I915_READ_FW(reg); - - I915_WRITE_FW(VLV_COUNTER_CONTROL, - _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH)); - upper = I915_READ_FW(reg); - } while (upper != tmp && --loop); - - /* - * Everywhere else we always use VLV_COUNTER_CONTROL with the - * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set - * now. - */ - - return lower | (u64)upper << 8; -} - -u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, - const i915_reg_t reg) -{ - struct intel_uncore *uncore = &dev_priv->uncore; - u64 time_hw, prev_hw, overflow_hw; - unsigned int fw_domains; - unsigned long flags; - unsigned int i; - u32 mul, div; - - if (!HAS_RC6(dev_priv)) - return 0; - - /* - * Store previous hw counter values for counter wrap-around handling. - * - * There are only four interesting registers and they live next to each - * other so we can use the relative address, compared to the smallest - * one as the index into driver storage. - */ - i = (i915_mmio_reg_offset(reg) - - i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32); - if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency))) - return 0; - - fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ); - - spin_lock_irqsave(&uncore->lock, flags); - intel_uncore_forcewake_get__locked(uncore, fw_domains); - - /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { - mul = 1000000; - div = dev_priv->czclk_freq; - overflow_hw = BIT_ULL(40); - time_hw = vlv_residency_raw(dev_priv, reg); - } else { - /* 833.33ns units on Gen9LP, 1.28us elsewhere. */ - if (IS_GEN9_LP(dev_priv)) { - mul = 10000; - div = 12; - } else { - mul = 1280; - div = 1; - } - - overflow_hw = BIT_ULL(32); - time_hw = intel_uncore_read_fw(uncore, reg); - } - - /* - * Counter wrap handling. - * - * But relying on a sufficient frequency of queries otherwise counters - * can still wrap. - */ - prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i]; - dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw; - - /* RC6 delta from last sample. */ - if (time_hw >= prev_hw) - time_hw -= prev_hw; - else - time_hw += overflow_hw - prev_hw; - - /* Add delta to RC6 extended raw driver copy. */ - time_hw += dev_priv->gt_pm.rc6.cur_residency[i]; - dev_priv->gt_pm.rc6.cur_residency[i] = time_hw; - - intel_uncore_forcewake_put__locked(uncore, fw_domains); - spin_unlock_irqrestore(&uncore->lock, flags); - - return mul_u64_u32_div(time_hw, mul, div); -} - -u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, - i915_reg_t reg) -{ - return DIV_ROUND_UP_ULL(intel_rc6_residency_ns(dev_priv, reg), 1000); -} - -u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat) -{ - u32 cagf; - - if (INTEL_GEN(dev_priv) >= 9) - cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; - else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) - cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; - else - cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; - - return cagf; -} diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h index 0f7390c850ec..b579c724b915 100644 --- a/drivers/gpu/drm/i915/intel_pm.h +++ b/drivers/gpu/drm/i915/intel_pm.h @@ -29,19 +29,6 @@ void intel_update_watermarks(struct intel_crtc *crtc); void intel_init_pm(struct drm_i915_private *dev_priv); void intel_init_clock_gating_hooks(struct drm_i915_private *dev_priv); void intel_pm_setup(struct drm_i915_private *dev_priv); -void intel_gpu_ips_init(struct drm_i915_private *dev_priv); -void intel_gpu_ips_teardown(void); -void intel_init_gt_powersave(struct drm_i915_private *dev_priv); -void intel_cleanup_gt_powersave(struct drm_i915_private *dev_priv); -void intel_sanitize_gt_powersave(struct drm_i915_private *dev_priv); -void intel_enable_gt_powersave(struct drm_i915_private *dev_priv); -void intel_disable_gt_powersave(struct drm_i915_private *dev_priv); -bool i915_rc6_ctx_wa_check(struct drm_i915_private *i915); -void i915_rc6_ctx_wa_suspend(struct drm_i915_private *i915); -void i915_rc6_ctx_wa_resume(struct drm_i915_private *i915); -void gen6_rps_busy(struct drm_i915_private *dev_priv); -void gen6_rps_idle(struct drm_i915_private *dev_priv); -void gen6_rps_boost(struct i915_request *rq); void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv); void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv); void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv); @@ -68,26 +55,9 @@ void skl_write_plane_wm(struct intel_plane *plane, void skl_write_cursor_wm(struct intel_plane *plane, const struct intel_crtc_state *crtc_state); bool ilk_disable_lp_wm(struct drm_device *dev); -int skl_check_pipe_max_pixel_rate(struct intel_crtc *intel_crtc, - struct intel_crtc_state *cstate); void intel_init_ipc(struct drm_i915_private *dev_priv); void intel_enable_ipc(struct drm_i915_private *dev_priv); -int intel_gpu_freq(struct drm_i915_private *dev_priv, int val); -int intel_freq_opcode(struct drm_i915_private *dev_priv, int val); -u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, i915_reg_t reg); -u64 intel_rc6_residency_us(struct drm_i915_private *dev_priv, i915_reg_t reg); - -u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat1); - -unsigned long i915_chipset_val(struct drm_i915_private *dev_priv); -unsigned long i915_mch_val(struct drm_i915_private *dev_priv); -unsigned long i915_gfx_val(struct drm_i915_private *dev_priv); -void i915_update_gfx_val(struct drm_i915_private *dev_priv); - -bool ironlake_set_drps(struct drm_i915_private *dev_priv, u8 val); -int intel_set_rps(struct drm_i915_private *dev_priv, u8 val); -void intel_rps_mark_interactive(struct drm_i915_private *i915, bool interactive); bool intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable); #endif /* __INTEL_PM_H__ */ diff --git a/drivers/gpu/drm/i915/intel_region_lmem.c b/drivers/gpu/drm/i915/intel_region_lmem.c new file mode 100644 index 000000000000..583118095635 --- /dev/null +++ b/drivers/gpu/drm/i915/intel_region_lmem.c @@ -0,0 +1,132 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "i915_drv.h" +#include "intel_memory_region.h" +#include "gem/i915_gem_lmem.h" +#include "gem/i915_gem_region.h" +#include "intel_region_lmem.h" + +static int init_fake_lmem_bar(struct intel_memory_region *mem) +{ + struct drm_i915_private *i915 = mem->i915; + struct i915_ggtt *ggtt = &i915->ggtt; + unsigned long n; + int ret; + + /* We want to 1:1 map the mappable aperture to our reserved region */ + + mem->fake_mappable.start = 0; + mem->fake_mappable.size = resource_size(&mem->region); + mem->fake_mappable.color = I915_COLOR_UNEVICTABLE; + + ret = drm_mm_reserve_node(&ggtt->vm.mm, &mem->fake_mappable); + if (ret) + return ret; + + mem->remap_addr = dma_map_resource(&i915->drm.pdev->dev, + mem->region.start, + mem->fake_mappable.size, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_FORCE_CONTIGUOUS); + if (dma_mapping_error(&i915->drm.pdev->dev, mem->remap_addr)) { + drm_mm_remove_node(&mem->fake_mappable); + return -EINVAL; + } + + for (n = 0; n < mem->fake_mappable.size >> PAGE_SHIFT; ++n) { + ggtt->vm.insert_page(&ggtt->vm, + mem->remap_addr + (n << PAGE_SHIFT), + n << PAGE_SHIFT, + I915_CACHE_NONE, 0); + } + + mem->region = (struct resource)DEFINE_RES_MEM(mem->remap_addr, + mem->fake_mappable.size); + + return 0; +} + +static void release_fake_lmem_bar(struct intel_memory_region *mem) +{ + if (drm_mm_node_allocated(&mem->fake_mappable)) + drm_mm_remove_node(&mem->fake_mappable); + + dma_unmap_resource(&mem->i915->drm.pdev->dev, + mem->remap_addr, + mem->fake_mappable.size, + PCI_DMA_BIDIRECTIONAL, + DMA_ATTR_FORCE_CONTIGUOUS); +} + +static void +region_lmem_release(struct intel_memory_region *mem) +{ + release_fake_lmem_bar(mem); + io_mapping_fini(&mem->iomap); + intel_memory_region_release_buddy(mem); +} + +static int +region_lmem_init(struct intel_memory_region *mem) +{ + int ret; + + if (i915_modparams.fake_lmem_start) { + ret = init_fake_lmem_bar(mem); + GEM_BUG_ON(ret); + } + + if (!io_mapping_init_wc(&mem->iomap, + mem->io_start, + resource_size(&mem->region))) + return -EIO; + + ret = intel_memory_region_init_buddy(mem); + if (ret) + io_mapping_fini(&mem->iomap); + + return ret; +} + +const struct intel_memory_region_ops intel_region_lmem_ops = { + .init = region_lmem_init, + .release = region_lmem_release, + .create_object = __i915_gem_lmem_object_create, +}; + +struct intel_memory_region * +intel_setup_fake_lmem(struct drm_i915_private *i915) +{ + struct pci_dev *pdev = i915->drm.pdev; + struct intel_memory_region *mem; + resource_size_t mappable_end; + resource_size_t io_start; + resource_size_t start; + + GEM_BUG_ON(i915_ggtt_has_aperture(&i915->ggtt)); + GEM_BUG_ON(!i915_modparams.fake_lmem_start); + + /* Your mappable aperture belongs to me now! */ + mappable_end = pci_resource_len(pdev, 2); + io_start = pci_resource_start(pdev, 2), + start = i915_modparams.fake_lmem_start; + + mem = intel_memory_region_create(i915, + start, + mappable_end, + PAGE_SIZE, + io_start, + &intel_region_lmem_ops); + if (!IS_ERR(mem)) { + DRM_INFO("Intel graphics fake LMEM: %pR\n", &mem->region); + DRM_INFO("Intel graphics fake LMEM IO start: %llx\n", + (u64)mem->io_start); + DRM_INFO("Intel graphics fake LMEM size: %llx\n", + (u64)resource_size(&mem->region)); + } + + return mem; +} diff --git a/drivers/gpu/drm/i915/intel_region_lmem.h b/drivers/gpu/drm/i915/intel_region_lmem.h new file mode 100644 index 000000000000..213def7c7b8a --- /dev/null +++ b/drivers/gpu/drm/i915/intel_region_lmem.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __INTEL_REGION_LMEM_H +#define __INTEL_REGION_LMEM_H + +struct drm_i915_private; + +extern const struct intel_memory_region_ops intel_region_lmem_ops; + +struct intel_memory_region * +intel_setup_fake_lmem(struct drm_i915_private *i915); + +#endif /* !__INTEL_REGION_LMEM_H */ diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 2fd3c097e1f5..ad719c9602af 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -27,7 +27,6 @@ */ #include <linux/pm_runtime.h> -#include <linux/vgaarb.h> #include <drm/drm_print.h> diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 9e583f13a9e4..94a97bf8c021 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -805,9 +805,6 @@ void assert_forcewakes_active(struct intel_uncore *uncore, /* We give fast paths for the really cool registers */ #define NEEDS_FORCE_WAKE(reg) ((reg) < 0x40000) -#define GEN11_NEEDS_FORCE_WAKE(reg) \ - ((reg) < 0x40000 || ((reg) >= 0x1c0000 && (reg) < 0x1dc000)) - #define __gen6_reg_read_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd; \ @@ -903,12 +900,10 @@ static const struct intel_forcewake_range __vlv_fw_ranges[] = { }) #define __gen11_fwtable_reg_read_fw_domains(uncore, offset) \ -({ \ - enum forcewake_domains __fwd = 0; \ - if (GEN11_NEEDS_FORCE_WAKE((offset))) \ - __fwd = find_fw_domain(uncore, offset); \ - __fwd; \ -}) + find_fw_domain(uncore, offset) + +#define __gen12_fwtable_reg_read_fw_domains(uncore, offset) \ + find_fw_domain(uncore, offset) /* *Must* be sorted by offset! See intel_shadow_table_check(). */ static const i915_reg_t gen8_shadowed_regs[] = { @@ -935,6 +930,20 @@ static const i915_reg_t gen11_shadowed_regs[] = { /* TODO: Other registers are not yet used */ }; +static const i915_reg_t gen12_shadowed_regs[] = { + RING_TAIL(RENDER_RING_BASE), /* 0x2000 (base) */ + GEN6_RPNSWREQ, /* 0xA008 */ + GEN6_RC_VIDEO_FREQ, /* 0xA00C */ + RING_TAIL(BLT_RING_BASE), /* 0x22000 (base) */ + RING_TAIL(GEN11_BSD_RING_BASE), /* 0x1C0000 (base) */ + RING_TAIL(GEN11_BSD2_RING_BASE), /* 0x1C4000 (base) */ + RING_TAIL(GEN11_VEBOX_RING_BASE), /* 0x1C8000 (base) */ + RING_TAIL(GEN11_BSD3_RING_BASE), /* 0x1D0000 (base) */ + RING_TAIL(GEN11_BSD4_RING_BASE), /* 0x1D4000 (base) */ + RING_TAIL(GEN11_VEBOX2_RING_BASE), /* 0x1D8000 (base) */ + /* TODO: Other registers are not yet used */ +}; + static int mmio_reg_cmp(u32 key, const i915_reg_t *reg) { u32 offset = i915_mmio_reg_offset(*reg); @@ -957,6 +966,7 @@ static bool is_gen##x##_shadowed(u32 offset) \ __is_genX_shadowed(8) __is_genX_shadowed(11) +__is_genX_shadowed(12) static enum forcewake_domains gen6_reg_write_fw_domains(struct intel_uncore *uncore, i915_reg_t reg) @@ -1005,8 +1015,18 @@ static const struct intel_forcewake_range __chv_fw_ranges[] = { #define __gen11_fwtable_reg_write_fw_domains(uncore, offset) \ ({ \ enum forcewake_domains __fwd = 0; \ - if (GEN11_NEEDS_FORCE_WAKE((offset)) && !is_gen11_shadowed(offset)) \ - __fwd = find_fw_domain(uncore, offset); \ + const u32 __offset = (offset); \ + if (!is_gen11_shadowed(__offset)) \ + __fwd = find_fw_domain(uncore, __offset); \ + __fwd; \ +}) + +#define __gen12_fwtable_reg_write_fw_domains(uncore, offset) \ +({ \ + enum forcewake_domains __fwd = 0; \ + const u32 __offset = (offset); \ + if (!is_gen12_shadowed(__offset)) \ + __fwd = find_fw_domain(uncore, __offset); \ __fwd; \ }) @@ -1065,9 +1085,51 @@ static const struct intel_forcewake_range __gen11_fw_ranges[] = { GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), + GEN_FW_RANGE(0xb480, 0xdeff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xdf00, 0xe8ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0xe900, 0x16dff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x16e00, 0x19fff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x1a000, 0x243ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x40000, 0x1bffff, 0), + GEN_FW_RANGE(0x1c0000, 0x1c3fff, FORCEWAKE_MEDIA_VDBOX0), + GEN_FW_RANGE(0x1c4000, 0x1c7fff, FORCEWAKE_MEDIA_VDBOX1), + GEN_FW_RANGE(0x1c8000, 0x1cbfff, FORCEWAKE_MEDIA_VEBOX0), + GEN_FW_RANGE(0x1cc000, 0x1cffff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x1d0000, 0x1d3fff, FORCEWAKE_MEDIA_VDBOX2), + GEN_FW_RANGE(0x1d4000, 0x1d7fff, FORCEWAKE_MEDIA_VDBOX3), + GEN_FW_RANGE(0x1d8000, 0x1dbfff, FORCEWAKE_MEDIA_VEBOX1) +}; + +/* *Must* be sorted by offset ranges! See intel_fw_table_check(). */ +static const struct intel_forcewake_range __gen12_fw_ranges[] = { + GEN_FW_RANGE(0x0, 0xaff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xb00, 0x1fff, 0), /* uncore range */ + GEN_FW_RANGE(0x2000, 0x26ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x2700, 0x2fff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x3000, 0x3fff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x4000, 0x51ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x5200, 0x7fff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x8000, 0x813f, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8140, 0x815f, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x8160, 0x82ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8300, 0x84ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x8500, 0x8bff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x8c00, 0x8cff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x8d00, 0x93ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x9400, 0x97ff, FORCEWAKE_ALL), + GEN_FW_RANGE(0x9800, 0xafff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xb000, 0xb47f, FORCEWAKE_RENDER), GEN_FW_RANGE(0xb480, 0xdfff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0xe000, 0xe8ff, FORCEWAKE_RENDER), - GEN_FW_RANGE(0xe900, 0x243ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0xe900, 0x147ff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x14800, 0x148ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x14900, 0x19fff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x1a000, 0x1a7ff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x1a800, 0x1afff, FORCEWAKE_BLITTER), + GEN_FW_RANGE(0x1b000, 0x1bfff, FORCEWAKE_RENDER), + GEN_FW_RANGE(0x1c000, 0x243ff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0x24400, 0x247ff, FORCEWAKE_RENDER), GEN_FW_RANGE(0x24800, 0x3ffff, FORCEWAKE_BLITTER), GEN_FW_RANGE(0x40000, 0x1bffff, 0), @@ -1228,6 +1290,7 @@ __gen_read(func, 16) \ __gen_read(func, 32) \ __gen_read(func, 64) +__gen_reg_read_funcs(gen12_fwtable); __gen_reg_read_funcs(gen11_fwtable); __gen_reg_read_funcs(fwtable); __gen_reg_read_funcs(gen6); @@ -1319,6 +1382,7 @@ __gen_write(func, 8) \ __gen_write(func, 16) \ __gen_write(func, 32) +__gen_reg_write_funcs(gen12_fwtable); __gen_reg_write_funcs(gen11_fwtable); __gen_reg_write_funcs(fwtable); __gen_reg_write_funcs(gen8); @@ -1690,10 +1754,14 @@ static int uncore_forcewake_init(struct intel_uncore *uncore) ASSIGN_FW_DOMAINS_TABLE(uncore, __gen9_fw_ranges); ASSIGN_WRITE_MMIO_VFUNCS(uncore, fwtable); ASSIGN_READ_MMIO_VFUNCS(uncore, fwtable); - } else { + } else if (IS_GEN(i915, 11)) { ASSIGN_FW_DOMAINS_TABLE(uncore, __gen11_fw_ranges); ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen11_fwtable); ASSIGN_READ_MMIO_VFUNCS(uncore, gen11_fwtable); + } else { + ASSIGN_FW_DOMAINS_TABLE(uncore, __gen12_fw_ranges); + ASSIGN_WRITE_MMIO_VFUNCS(uncore, gen12_fwtable); + ASSIGN_READ_MMIO_VFUNCS(uncore, gen12_fwtable); } uncore->pmic_bus_access_nb.notifier_call = i915_pmic_bus_access_notifier; diff --git a/drivers/gpu/drm/i915/intel_uncore.h b/drivers/gpu/drm/i915/intel_uncore.h index 414fc2cb0459..dcfa243892c6 100644 --- a/drivers/gpu/drm/i915/intel_uncore.h +++ b/drivers/gpu/drm/i915/intel_uncore.h @@ -378,23 +378,23 @@ intel_uncore_read64_2x32(struct intel_uncore *uncore, static inline void intel_uncore_rmw(struct intel_uncore *uncore, i915_reg_t reg, u32 clear, u32 set) { - u32 val; + u32 old, val; - val = intel_uncore_read(uncore, reg); - val &= ~clear; - val |= set; - intel_uncore_write(uncore, reg, val); + old = intel_uncore_read(uncore, reg); + val = (old & ~clear) | set; + if (val != old) + intel_uncore_write(uncore, reg, val); } static inline void intel_uncore_rmw_fw(struct intel_uncore *uncore, i915_reg_t reg, u32 clear, u32 set) { - u32 val; + u32 old, val; - val = intel_uncore_read_fw(uncore, reg); - val &= ~clear; - val |= set; - intel_uncore_write_fw(uncore, reg, val); + old = intel_uncore_read_fw(uncore, reg); + val = (old & ~clear) | set; + if (val != old) + intel_uncore_write_fw(uncore, reg, val); } static inline int intel_uncore_write_and_verify(struct intel_uncore *uncore, diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.c b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c new file mode 100644 index 000000000000..a29d93707345 --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_tgl.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#include <linux/sysfs.h> + +#include "i915_drv.h" +#include "i915_oa_tgl.h" + +static const struct i915_oa_reg b_counter_config_test_oa[] = { + { _MMIO(0xD920), 0x00000000 }, + { _MMIO(0xD900), 0x00000000 }, + { _MMIO(0xD904), 0xF0800000 }, + { _MMIO(0xD910), 0x00000000 }, + { _MMIO(0xD914), 0xF0800000 }, + { _MMIO(0xDC40), 0x00FF0000 }, + { _MMIO(0xD940), 0x00000004 }, + { _MMIO(0xD944), 0x0000FFFF }, + { _MMIO(0xDC00), 0x00000004 }, + { _MMIO(0xDC04), 0x0000FFFF }, + { _MMIO(0xD948), 0x00000003 }, + { _MMIO(0xD94C), 0x0000FFFF }, + { _MMIO(0xDC08), 0x00000003 }, + { _MMIO(0xDC0C), 0x0000FFFF }, + { _MMIO(0xD950), 0x00000007 }, + { _MMIO(0xD954), 0x0000FFFF }, + { _MMIO(0xDC10), 0x00000007 }, + { _MMIO(0xDC14), 0x0000FFFF }, + { _MMIO(0xD958), 0x00100002 }, + { _MMIO(0xD95C), 0x0000FFF7 }, + { _MMIO(0xDC18), 0x00100002 }, + { _MMIO(0xDC1C), 0x0000FFF7 }, + { _MMIO(0xD960), 0x00100002 }, + { _MMIO(0xD964), 0x0000FFCF }, + { _MMIO(0xDC20), 0x00100002 }, + { _MMIO(0xDC24), 0x0000FFCF }, + { _MMIO(0xD968), 0x00100082 }, + { _MMIO(0xD96C), 0x0000FFEF }, + { _MMIO(0xDC28), 0x00100082 }, + { _MMIO(0xDC2C), 0x0000FFEF }, + { _MMIO(0xD970), 0x001000C2 }, + { _MMIO(0xD974), 0x0000FFE7 }, + { _MMIO(0xDC30), 0x001000C2 }, + { _MMIO(0xDC34), 0x0000FFE7 }, + { _MMIO(0xD978), 0x00100001 }, + { _MMIO(0xD97C), 0x0000FFE7 }, + { _MMIO(0xDC38), 0x00100001 }, + { _MMIO(0xDC3C), 0x0000FFE7 }, +}; + +static const struct i915_oa_reg flex_eu_config_test_oa[] = { +}; + +static const struct i915_oa_reg mux_config_test_oa[] = { + { _MMIO(0x0D04), 0x00000200 }, + { _MMIO(0x9840), 0x00000000 }, + { _MMIO(0x9884), 0x00000000 }, + { _MMIO(0x9888), 0x280E0000 }, + { _MMIO(0x9888), 0x1E0E0147 }, + { _MMIO(0x9888), 0x180E0000 }, + { _MMIO(0x9888), 0x160E0000 }, + { _MMIO(0x9888), 0x1E0F1000 }, + { _MMIO(0x9888), 0x1E104000 }, + { _MMIO(0x9888), 0x2E020100 }, + { _MMIO(0x9888), 0x2C030004 }, + { _MMIO(0x9888), 0x38003000 }, + { _MMIO(0x9888), 0x1E0A8000 }, + { _MMIO(0x9884), 0x00000003 }, + { _MMIO(0x9888), 0x49110000 }, + { _MMIO(0x9888), 0x5D101400 }, + { _MMIO(0x9888), 0x1D140020 }, + { _MMIO(0x9888), 0x1D1103A3 }, + { _MMIO(0x9888), 0x01110000 }, + { _MMIO(0x9888), 0x61111000 }, + { _MMIO(0x9888), 0x1F128000 }, + { _MMIO(0x9888), 0x17100000 }, + { _MMIO(0x9888), 0x55100630 }, + { _MMIO(0x9888), 0x57100000 }, + { _MMIO(0x9888), 0x31100000 }, + { _MMIO(0x9884), 0x00000003 }, + { _MMIO(0x9888), 0x65100002 }, + { _MMIO(0x9884), 0x00000000 }, + { _MMIO(0x9888), 0x42000001 }, +}; + +static ssize_t +show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf) +{ + return sprintf(buf, "1\n"); +} + +void +i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv) +{ + strlcpy(dev_priv->perf.test_config.uuid, + "80a833f0-2504-4321-8894-e9277844ce7b", + sizeof(dev_priv->perf.test_config.uuid)); + dev_priv->perf.test_config.id = 1; + + dev_priv->perf.test_config.mux_regs = mux_config_test_oa; + dev_priv->perf.test_config.mux_regs_len = ARRAY_SIZE(mux_config_test_oa); + + dev_priv->perf.test_config.b_counter_regs = b_counter_config_test_oa; + dev_priv->perf.test_config.b_counter_regs_len = ARRAY_SIZE(b_counter_config_test_oa); + + dev_priv->perf.test_config.flex_regs = flex_eu_config_test_oa; + dev_priv->perf.test_config.flex_regs_len = ARRAY_SIZE(flex_eu_config_test_oa); + + dev_priv->perf.test_config.sysfs_metric.name = "80a833f0-2504-4321-8894-e9277844ce7b"; + dev_priv->perf.test_config.sysfs_metric.attrs = dev_priv->perf.test_config.attrs; + + dev_priv->perf.test_config.attrs[0] = &dev_priv->perf.test_config.sysfs_metric_id.attr; + + dev_priv->perf.test_config.sysfs_metric_id.attr.name = "id"; + dev_priv->perf.test_config.sysfs_metric_id.attr.mode = 0444; + dev_priv->perf.test_config.sysfs_metric_id.show = show_test_oa_id; +} diff --git a/drivers/gpu/drm/i915/oa/i915_oa_tgl.h b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h new file mode 100644 index 000000000000..4c25f0be825c --- /dev/null +++ b/drivers/gpu/drm/i915/oa/i915_oa_tgl.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2018 Intel Corporation + * + * Autogenerated file by GPU Top : https://github.com/rib/gputop + * DO NOT EDIT manually! + */ + +#ifndef __I915_OA_TGL_H__ +#define __I915_OA_TGL_H__ + +struct drm_i915_private; + +void i915_perf_load_test_config_tgl(struct drm_i915_private *dev_priv); + +#endif diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index 77d844ac8b71..260b0ee5d1e3 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -68,7 +68,7 @@ static struct live_active *__live_alloc(struct drm_i915_private *i915) return NULL; kref_init(&active->ref); - i915_active_init(i915, &active->base, __live_active, __live_retire); + i915_active_init(&active->base, __live_active, __live_retire); return active; } @@ -79,7 +79,6 @@ __live_active_setup(struct drm_i915_private *i915) struct intel_engine_cs *engine; struct i915_sw_fence *submit; struct live_active *active; - enum intel_engine_id id; unsigned int count = 0; int err = 0; @@ -97,7 +96,7 @@ __live_active_setup(struct drm_i915_private *i915) if (err) goto out; - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { struct i915_request *rq; rq = i915_request_create(engine->kernel_context); @@ -110,7 +109,7 @@ __live_active_setup(struct drm_i915_private *i915) submit, GFP_KERNEL); if (err >= 0) - err = i915_active_ref(&active->base, rq->timeline, rq); + err = i915_active_add_request(&active->base, rq); i915_request_add(rq); if (err) { pr_err("Failed to track active ref!\n"); @@ -121,7 +120,7 @@ __live_active_setup(struct drm_i915_private *i915) } i915_active_release(&active->base); - if (active->retired && count) { + if (READ_ONCE(active->retired) && count) { pr_err("i915_active retired before submission!\n"); err = -EINVAL; } @@ -146,35 +145,25 @@ static int live_active_wait(void *arg) { struct drm_i915_private *i915 = arg; struct live_active *active; - intel_wakeref_t wakeref; int err = 0; /* Check that we get a callback when requests retire upon waiting */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - active = __live_active_setup(i915); - if (IS_ERR(active)) { - err = PTR_ERR(active); - goto err; - } + if (IS_ERR(active)) + return PTR_ERR(active); i915_active_wait(&active->base); - if (!active->retired) { + if (!READ_ONCE(active->retired)) { pr_err("i915_active not retired after waiting!\n"); err = -EINVAL; } __live_put(active); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(i915)) err = -EIO; -err: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; } @@ -182,35 +171,25 @@ static int live_active_retire(void *arg) { struct drm_i915_private *i915 = arg; struct live_active *active; - intel_wakeref_t wakeref; int err = 0; /* Check that we get a callback when requests are indirectly retired */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - active = __live_active_setup(i915); - if (IS_ERR(active)) { - err = PTR_ERR(active); - goto err; - } + if (IS_ERR(active)) + return PTR_ERR(active); /* waits for & retires all requests */ - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(i915)) err = -EIO; - if (!active->retired) { + if (!READ_ONCE(active->retired)) { pr_err("i915_active not retired after flushing!\n"); err = -EINVAL; } __live_put(active); -err: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); - return err; } @@ -226,3 +205,48 @@ int i915_active_live_selftests(struct drm_i915_private *i915) return i915_subtests(tests, i915); } + +static struct intel_engine_cs *node_to_barrier(struct active_node *it) +{ + struct intel_engine_cs *engine; + + if (!is_barrier(&it->base)) + return NULL; + + engine = __barrier_to_engine(it); + smp_rmb(); /* serialise with add_active_barriers */ + if (!is_barrier(&it->base)) + return NULL; + + return engine; +} + +void i915_active_print(struct i915_active *ref, struct drm_printer *m) +{ + drm_printf(m, "active %pS:%pS\n", ref->active, ref->retire); + drm_printf(m, "\tcount: %d\n", atomic_read(&ref->count)); + drm_printf(m, "\tpreallocated barriers? %s\n", + yesno(!llist_empty(&ref->preallocated_barriers))); + + if (i915_active_acquire_if_busy(ref)) { + struct active_node *it, *n; + + rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { + struct intel_engine_cs *engine; + + engine = node_to_barrier(it); + if (engine) { + drm_printf(m, "\tbarrier: %s\n", engine->name); + continue; + } + + if (i915_active_fence_isset(&it->base)) { + drm_printf(m, + "\ttimeline: %llx\n", it->timeline); + continue; + } + } + + i915_active_release(ref); + } +} diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c index 23f784eae1e7..1b856bae67b5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_buddy.c +++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c @@ -375,6 +375,8 @@ retry: if (err) break; + + cond_resched(); } if (err == -ENOMEM) @@ -687,6 +689,8 @@ static int igt_buddy_alloc_range(void *arg) rem -= size; if (!rem) break; + + cond_resched(); } if (err == -ENOMEM) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 37593831b539..d83f6bf6d9d4 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -15,23 +15,26 @@ #include "igt_flush_test.h" #include "mock_drm.h" -static int switch_to_context(struct drm_i915_private *i915, - struct i915_gem_context *ctx) +static int switch_to_context(struct i915_gem_context *ctx) { - struct intel_engine_cs *engine; - enum intel_engine_id id; + struct i915_gem_engines_iter it; + struct intel_context *ce; + int err = 0; - for_each_engine(engine, i915, id) { + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { struct i915_request *rq; - rq = igt_request_alloc(ctx, engine); - if (IS_ERR(rq)) - return PTR_ERR(rq); + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + break; + } i915_request_add(rq); } + i915_gem_context_unlock_engines(ctx); - return 0; + return err; } static void trash_stolen(struct drm_i915_private *i915) @@ -42,6 +45,10 @@ static void trash_stolen(struct drm_i915_private *i915) unsigned long page; u32 prng = 0x12345678; + /* XXX: fsck. needs some more thought... */ + if (!i915_ggtt_has_aperture(ggtt)) + return; + for (page = 0; page < size; page += PAGE_SIZE) { const dma_addr_t dma = i915->dsm.start + page; u32 __iomem *s; @@ -117,12 +124,9 @@ static void pm_resume(struct drm_i915_private *i915) */ with_intel_runtime_pm(&i915->runtime_pm, wakeref) { intel_gt_sanitize(&i915->gt, false); - i915_gem_sanitize(i915); - mutex_lock(&i915->drm.struct_mutex); i915_gem_restore_gtt_mappings(i915); - i915_gem_restore_fences(i915); - mutex_unlock(&i915->drm.struct_mutex); + i915_gem_restore_fences(&i915->ggtt); i915_gem_resume(i915); } @@ -140,11 +144,9 @@ static int igt_gem_suspend(void *arg) return PTR_ERR(file); err = -ENOMEM; - mutex_lock(&i915->drm.struct_mutex); ctx = live_context(i915, file); if (!IS_ERR(ctx)) - err = switch_to_context(i915, ctx); - mutex_unlock(&i915->drm.struct_mutex); + err = switch_to_context(ctx); if (err) goto out; @@ -159,9 +161,7 @@ static int igt_gem_suspend(void *arg) pm_resume(i915); - mutex_lock(&i915->drm.struct_mutex); - err = switch_to_context(i915, ctx); - mutex_unlock(&i915->drm.struct_mutex); + err = switch_to_context(ctx); out: mock_file_free(i915, file); return err; @@ -179,11 +179,9 @@ static int igt_gem_hibernate(void *arg) return PTR_ERR(file); err = -ENOMEM; - mutex_lock(&i915->drm.struct_mutex); ctx = live_context(i915, file); if (!IS_ERR(ctx)) - err = switch_to_context(i915, ctx); - mutex_unlock(&i915->drm.struct_mutex); + err = switch_to_context(ctx); if (err) goto out; @@ -198,9 +196,7 @@ static int igt_gem_hibernate(void *arg) pm_resume(i915); - mutex_lock(&i915->drm.struct_mutex); - err = switch_to_context(i915, ctx); - mutex_unlock(&i915->drm.struct_mutex); + err = switch_to_context(ctx); out: mock_file_free(i915, file); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index cb30c669b1b7..42e948144f1b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -43,8 +43,7 @@ static void quirk_add(struct drm_i915_gem_object *obj, list_add(&obj->st_link, objects); } -static int populate_ggtt(struct drm_i915_private *i915, - struct list_head *objects) +static int populate_ggtt(struct i915_ggtt *ggtt, struct list_head *objects) { unsigned long unbound, bound, count; struct drm_i915_gem_object *obj; @@ -53,7 +52,8 @@ static int populate_ggtt(struct drm_i915_private *i915, do { struct i915_vma *vma; - obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(ggtt->vm.i915, + I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); @@ -70,7 +70,7 @@ static int populate_ggtt(struct drm_i915_private *i915, count++; } while (1); pr_debug("Filled GGTT with %lu pages [%llu total]\n", - count, i915->ggtt.vm.total / PAGE_SIZE); + count, ggtt->vm.total / PAGE_SIZE); bound = 0; unbound = 0; @@ -96,7 +96,7 @@ static int populate_ggtt(struct drm_i915_private *i915, return -EINVAL; } - if (list_empty(&i915->ggtt.vm.bound_list)) { + if (list_empty(&ggtt->vm.bound_list)) { pr_err("No objects on the GGTT inactive list!\n"); return -EINVAL; } @@ -104,20 +104,16 @@ static int populate_ggtt(struct drm_i915_private *i915, return 0; } -static void unpin_ggtt(struct drm_i915_private *i915) +static void unpin_ggtt(struct i915_ggtt *ggtt) { - struct i915_ggtt *ggtt = &i915->ggtt; struct i915_vma *vma; - mutex_lock(&ggtt->vm.mutex); - list_for_each_entry(vma, &i915->ggtt.vm.bound_list, vm_link) + list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) if (vma->obj->mm.quirked) i915_vma_unpin(vma); - mutex_unlock(&ggtt->vm.mutex); } -static void cleanup_objects(struct drm_i915_private *i915, - struct list_head *list) +static void cleanup_objects(struct i915_ggtt *ggtt, struct list_head *list) { struct drm_i915_gem_object *obj, *on; @@ -127,44 +123,44 @@ static void cleanup_objects(struct drm_i915_private *i915, i915_gem_object_put(obj); } - mutex_unlock(&i915->drm.struct_mutex); - - i915_gem_drain_freed_objects(i915); - - mutex_lock(&i915->drm.struct_mutex); + i915_gem_drain_freed_objects(ggtt->vm.i915); } static int igt_evict_something(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_ggtt *ggtt = &i915->ggtt; + struct intel_gt *gt = arg; + struct i915_ggtt *ggtt = gt->ggtt; LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and try to evict one. */ - err = populate_ggtt(i915, &objects); + err = populate_ggtt(ggtt, &objects); if (err) goto cleanup; /* Everything is pinned, nothing should happen */ + mutex_lock(&ggtt->vm.mutex); err = i915_gem_evict_something(&ggtt->vm, I915_GTT_PAGE_SIZE, 0, 0, 0, U64_MAX, 0); + mutex_unlock(&ggtt->vm.mutex); if (err != -ENOSPC) { pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n", err); goto cleanup; } - unpin_ggtt(i915); + unpin_ggtt(ggtt); /* Everything is unpinned, we should be able to evict something */ + mutex_lock(&ggtt->vm.mutex); err = i915_gem_evict_something(&ggtt->vm, I915_GTT_PAGE_SIZE, 0, 0, 0, U64_MAX, 0); + mutex_unlock(&ggtt->vm.mutex); if (err) { pr_err("i915_gem_evict_something failed on a full GGTT with err=%d\n", err); @@ -172,13 +168,14 @@ static int igt_evict_something(void *arg) } cleanup: - cleanup_objects(i915, &objects); + cleanup_objects(ggtt, &objects); return err; } static int igt_overcommit(void *arg) { - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; + struct i915_ggtt *ggtt = gt->ggtt; struct drm_i915_gem_object *obj; struct i915_vma *vma; LIST_HEAD(objects); @@ -188,11 +185,11 @@ static int igt_overcommit(void *arg) * We expect it to fail. */ - err = populate_ggtt(i915, &objects); + err = populate_ggtt(ggtt, &objects); if (err) goto cleanup; - obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto cleanup; @@ -208,14 +205,14 @@ static int igt_overcommit(void *arg) } cleanup: - cleanup_objects(i915, &objects); + cleanup_objects(ggtt, &objects); return err; } static int igt_evict_for_vma(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_ggtt *ggtt = &i915->ggtt; + struct intel_gt *gt = arg; + struct i915_ggtt *ggtt = gt->ggtt; struct drm_mm_node target = { .start = 0, .size = 4096, @@ -225,22 +222,26 @@ static int igt_evict_for_vma(void *arg) /* Fill the GGTT with pinned objects and try to evict a range. */ - err = populate_ggtt(i915, &objects); + err = populate_ggtt(ggtt, &objects); if (err) goto cleanup; /* Everything is pinned, nothing should happen */ + mutex_lock(&ggtt->vm.mutex); err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); + mutex_unlock(&ggtt->vm.mutex); if (err != -ENOSPC) { pr_err("i915_gem_evict_for_node on a full GGTT returned err=%d\n", err); goto cleanup; } - unpin_ggtt(i915); + unpin_ggtt(ggtt); /* Everything is unpinned, we should be able to evict the node */ + mutex_lock(&ggtt->vm.mutex); err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); + mutex_unlock(&ggtt->vm.mutex); if (err) { pr_err("i915_gem_evict_for_node returned err=%d\n", err); @@ -248,7 +249,7 @@ static int igt_evict_for_vma(void *arg) } cleanup: - cleanup_objects(i915, &objects); + cleanup_objects(ggtt, &objects); return err; } @@ -261,8 +262,8 @@ static void mock_color_adjust(const struct drm_mm_node *node, static int igt_evict_for_cache_color(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_ggtt *ggtt = &i915->ggtt; + struct intel_gt *gt = arg; + struct i915_ggtt *ggtt = gt->ggtt; const unsigned long flags = PIN_OFFSET_FIXED; struct drm_mm_node target = { .start = I915_GTT_PAGE_SIZE * 2, @@ -274,14 +275,16 @@ static int igt_evict_for_cache_color(void *arg) LIST_HEAD(objects); int err; - /* Currently the use of color_adjust is limited to cache domains within - * the ggtt, and so the presence of mm.color_adjust is assumed to be - * i915_gtt_color_adjust throughout our driver, so using a mock color - * adjust will work just fine for our purposes. + /* + * Currently the use of color_adjust for the GGTT is limited to cache + * coloring and guard pages, and so the presence of mm.color_adjust for + * the GGTT is assumed to be i915_ggtt_color_adjust, hence using a mock + * color adjust will work just fine for our purposes. */ ggtt->vm.mm.color_adjust = mock_color_adjust; + GEM_BUG_ON(!i915_vm_has_cache_coloring(&ggtt->vm)); - obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto cleanup; @@ -297,7 +300,7 @@ static int igt_evict_for_cache_color(void *arg) goto cleanup; } - obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE); + obj = i915_gem_object_create_internal(gt->i915, I915_GTT_PAGE_SIZE); if (IS_ERR(obj)) { err = PTR_ERR(obj); goto cleanup; @@ -317,7 +320,9 @@ static int igt_evict_for_cache_color(void *arg) i915_vma_unpin(vma); /* Remove just the second vma */ + mutex_lock(&ggtt->vm.mutex); err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); + mutex_unlock(&ggtt->vm.mutex); if (err) { pr_err("[0]i915_gem_evict_for_node returned err=%d\n", err); goto cleanup; @@ -328,7 +333,9 @@ static int igt_evict_for_cache_color(void *arg) */ target.color = I915_CACHE_L3_LLC; + mutex_lock(&ggtt->vm.mutex); err = i915_gem_evict_for_node(&ggtt->vm, &target, 0); + mutex_unlock(&ggtt->vm.mutex); if (!err) { pr_err("[1]i915_gem_evict_for_node returned err=%d\n", err); err = -EINVAL; @@ -338,36 +345,40 @@ static int igt_evict_for_cache_color(void *arg) err = 0; cleanup: - unpin_ggtt(i915); - cleanup_objects(i915, &objects); + unpin_ggtt(ggtt); + cleanup_objects(ggtt, &objects); ggtt->vm.mm.color_adjust = NULL; return err; } static int igt_evict_vm(void *arg) { - struct drm_i915_private *i915 = arg; - struct i915_ggtt *ggtt = &i915->ggtt; + struct intel_gt *gt = arg; + struct i915_ggtt *ggtt = gt->ggtt; LIST_HEAD(objects); int err; /* Fill the GGTT with pinned objects and try to evict everything. */ - err = populate_ggtt(i915, &objects); + err = populate_ggtt(ggtt, &objects); if (err) goto cleanup; /* Everything is pinned, nothing should happen */ + mutex_lock(&ggtt->vm.mutex); err = i915_gem_evict_vm(&ggtt->vm); + mutex_unlock(&ggtt->vm.mutex); if (err) { pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", err); goto cleanup; } - unpin_ggtt(i915); + unpin_ggtt(ggtt); + mutex_lock(&ggtt->vm.mutex); err = i915_gem_evict_vm(&ggtt->vm); + mutex_unlock(&ggtt->vm.mutex); if (err) { pr_err("i915_gem_evict_vm on a full GGTT returned err=%d]\n", err); @@ -375,14 +386,16 @@ static int igt_evict_vm(void *arg) } cleanup: - cleanup_objects(i915, &objects); + cleanup_objects(ggtt, &objects); return err; } static int igt_evict_contexts(void *arg) { const u64 PRETEND_GGTT_SIZE = 16ull << 20; - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; + struct i915_ggtt *ggtt = gt->ggtt; + struct drm_i915_private *i915 = gt->i915; struct intel_engine_cs *engine; enum intel_engine_id id; struct reserved { @@ -408,14 +421,14 @@ static int igt_evict_contexts(void *arg) if (!HAS_FULL_PPGTT(i915)) return 0; - mutex_lock(&i915->drm.struct_mutex); wakeref = intel_runtime_pm_get(&i915->runtime_pm); /* Reserve a block so that we know we have enough to fit a few rq */ memset(&hole, 0, sizeof(hole)); - err = i915_gem_gtt_insert(&i915->ggtt.vm, &hole, + mutex_lock(&ggtt->vm.mutex); + err = i915_gem_gtt_insert(&ggtt->vm, &hole, PRETEND_GGTT_SIZE, 0, I915_COLOR_UNEVICTABLE, - 0, i915->ggtt.vm.total, + 0, ggtt->vm.total, PIN_NOEVICT); if (err) goto out_locked; @@ -425,15 +438,17 @@ static int igt_evict_contexts(void *arg) do { struct reserved *r; + mutex_unlock(&ggtt->vm.mutex); r = kcalloc(1, sizeof(*r), GFP_KERNEL); + mutex_lock(&ggtt->vm.mutex); if (!r) { err = -ENOMEM; goto out_locked; } - if (i915_gem_gtt_insert(&i915->ggtt.vm, &r->node, + if (i915_gem_gtt_insert(&ggtt->vm, &r->node, 1ul << 20, 0, I915_COLOR_UNEVICTABLE, - 0, i915->ggtt.vm.total, + 0, ggtt->vm.total, PIN_NOEVICT)) { kfree(r); break; @@ -445,11 +460,11 @@ static int igt_evict_contexts(void *arg) count++; } while (1); drm_mm_remove_node(&hole); - mutex_unlock(&i915->drm.struct_mutex); + mutex_unlock(&ggtt->vm.mutex); pr_info("Filled GGTT with %lu 1MiB nodes\n", count); /* Overfill the GGTT with context objects and so try to evict one. */ - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { struct i915_sw_fence fence; struct drm_file *file; @@ -460,7 +475,6 @@ static int igt_evict_contexts(void *arg) } count = 0; - mutex_lock(&i915->drm.struct_mutex); onstack_fence_init(&fence); do { struct i915_request *rq; @@ -478,8 +492,8 @@ static int igt_evict_contexts(void *arg) if (IS_ERR(rq)) { /* When full, fail_if_busy will trigger EBUSY */ if (PTR_ERR(rq) != -EBUSY) { - pr_err("Unexpected error from request alloc (ctx hw id %u, on %s): %d\n", - ctx->hw_id, engine->name, + pr_err("Unexpected error from request alloc (on %s): %d\n", + engine->name, (int)PTR_ERR(rq)); err = PTR_ERR(rq); } @@ -497,8 +511,6 @@ static int igt_evict_contexts(void *arg) count++; err = 0; } while(1); - mutex_unlock(&i915->drm.struct_mutex); - onstack_fence_fini(&fence); pr_info("Submitted %lu contexts/requests on %s\n", count, engine->name); @@ -508,9 +520,9 @@ static int igt_evict_contexts(void *arg) break; } - mutex_lock(&i915->drm.struct_mutex); + mutex_lock(&ggtt->vm.mutex); out_locked: - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(i915)) err = -EIO; while (reserved) { struct reserved *next = reserved->next; @@ -522,8 +534,8 @@ out_locked: } if (drm_mm_node_allocated(&hole)) drm_mm_remove_node(&hole); + mutex_unlock(&ggtt->vm.mutex); intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -545,11 +557,8 @@ int i915_gem_evict_mock_selftests(void) if (!i915) return -ENOMEM; - mutex_lock(&i915->drm.struct_mutex); with_intel_runtime_pm(&i915->runtime_pm, wakeref) - err = i915_subtests(tests, i915); - - mutex_unlock(&i915->drm.struct_mutex); + err = i915_subtests(tests, &i915->gt); drm_dev_put(&i915->drm); return err; @@ -564,5 +573,5 @@ int i915_gem_evict_live_selftests(struct drm_i915_private *i915) if (intel_gt_is_wedged(&i915->gt)) return 0; - return i915_subtests(tests, i915); + return intel_gt_live_subtests(tests, &i915->gt); } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 31a51ca1ddcb..3f7e80fb3bbd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -25,26 +25,20 @@ #include <linux/list_sort.h> #include <linux/prime_numbers.h> +#include "gem/i915_gem_context.h" #include "gem/selftests/mock_context.h" +#include "gt/intel_context.h" #include "i915_random.h" #include "i915_selftest.h" #include "mock_drm.h" #include "mock_gem_device.h" +#include "igt_flush_test.h" static void cleanup_freed_objects(struct drm_i915_private *i915) { - /* - * As we may hold onto the struct_mutex for inordinate lengths of - * time, the NMI khungtaskd detector may fire for the free objects - * worker. - */ - mutex_unlock(&i915->drm.struct_mutex); - i915_gem_drain_freed_objects(i915); - - mutex_lock(&i915->drm.struct_mutex); } static void fake_free_pages(struct drm_i915_gem_object *obj, @@ -88,8 +82,6 @@ static int fake_get_pages(struct drm_i915_gem_object *obj) } GEM_BUG_ON(rem); - obj->mm.madv = I915_MADV_DONTNEED; - __i915_gem_object_set_pages(obj, pages, sg_page_sizes); return 0; @@ -101,7 +93,6 @@ static void fake_put_pages(struct drm_i915_gem_object *obj, { fake_free_pages(obj, pages); obj->mm.dirty = false; - obj->mm.madv = I915_MADV_WILLNEED; } static const struct drm_i915_gem_object_ops fake_ops = { @@ -113,6 +104,7 @@ static const struct drm_i915_gem_object_ops fake_ops = { static struct drm_i915_gem_object * fake_dma_object(struct drm_i915_private *i915, u64 size) { + static struct lock_class_key lock_class; struct drm_i915_gem_object *obj; GEM_BUG_ON(!size); @@ -126,7 +118,9 @@ fake_dma_object(struct drm_i915_private *i915, u64 size) goto err; drm_gem_private_object_init(&i915->drm, &obj->base, size); - i915_gem_object_init(obj, &fake_ops); + i915_gem_object_init(obj, &fake_ops, &lock_class); + + i915_gem_object_set_volatile(obj); obj->write_domain = I915_GEM_DOMAIN_CPU; obj->read_domains = I915_GEM_DOMAIN_CPU; @@ -293,18 +287,20 @@ static int lowlevel_hole(struct drm_i915_private *i915, mock_vma.node.size = BIT_ULL(size); mock_vma.node.start = addr; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0); - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + vm->insert_entries(vm, &mock_vma, + I915_CACHE_NONE, 0); } count = n; i915_random_reorder(order, count, &prng); for (n = 0; n < count; n++) { u64 addr = hole_start + order[n] * BIT_ULL(size); + intel_wakeref_t wakeref; GEM_BUG_ON(addr + BIT_ULL(size) > vm->total); - vm->clear_range(vm, addr, BIT_ULL(size)); + with_intel_runtime_pm(&i915->runtime_pm, wakeref) + vm->clear_range(vm, addr, BIT_ULL(size)); } i915_gem_object_unpin_pages(obj); @@ -875,6 +871,15 @@ static int __shrink_hole(struct drm_i915_private *i915, i915_vma_unpin(vma); addr += size; + /* + * Since we are injecting allocation faults at random intervals, + * wait for this allocation to complete before we change the + * faultinjection. + */ + err = i915_vma_sync(vma); + if (err) + break; + if (igt_timeout(end_time, "%s timed out at ofset %llx [%llx - %llx]\n", __func__, addr, hole_start, hole_end)) { @@ -1008,21 +1013,19 @@ static int exercise_ppgtt(struct drm_i915_private *dev_priv, if (IS_ERR(file)) return PTR_ERR(file); - mutex_lock(&dev_priv->drm.struct_mutex); ppgtt = i915_ppgtt_create(dev_priv); if (IS_ERR(ppgtt)) { err = PTR_ERR(ppgtt); - goto out_unlock; + goto out_free; } GEM_BUG_ON(offset_in_page(ppgtt->vm.total)); - GEM_BUG_ON(ppgtt->vm.closed); + GEM_BUG_ON(!atomic_read(&ppgtt->vm.open)); err = func(dev_priv, &ppgtt->vm, 0, ppgtt->vm.total, end_time); i915_vm_put(&ppgtt->vm); -out_unlock: - mutex_unlock(&dev_priv->drm.struct_mutex); +out_free: mock_file_free(dev_priv, file); return err; } @@ -1085,7 +1088,6 @@ static int exercise_ggtt(struct drm_i915_private *i915, IGT_TIMEOUT(end_time); int err = 0; - mutex_lock(&i915->drm.struct_mutex); restart: list_sort(NULL, &ggtt->vm.mm.hole_stack, sort_holes); drm_mm_for_each_hole(node, &ggtt->vm.mm, hole_start, hole_end) { @@ -1106,7 +1108,6 @@ restart: last = hole_end; goto restart; } - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -1148,13 +1149,12 @@ static int igt_ggtt_page(void *arg) unsigned int *order, n; int err; - mutex_lock(&i915->drm.struct_mutex); + if (!i915_ggtt_has_aperture(ggtt)) + return 0; obj = i915_gem_object_create_internal(i915, PAGE_SIZE); - if (IS_ERR(obj)) { - err = PTR_ERR(obj); - goto out_unlock; - } + if (IS_ERR(obj)) + return PTR_ERR(obj); err = i915_gem_object_pin_pages(obj); if (err) @@ -1222,8 +1222,6 @@ out_unpin: i915_gem_object_unpin_pages(obj); out_free: i915_gem_object_put(obj); -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -1234,10 +1232,13 @@ static void track_vma_bind(struct i915_vma *vma) atomic_inc(&obj->bind_count); /* track for eviction later */ __i915_gem_object_pin_pages(obj); + GEM_BUG_ON(vma->pages); + atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE); + __i915_gem_object_pin_pages(obj); vma->pages = obj->mm.pages; mutex_lock(&vma->vm->mutex); - list_move_tail(&vma->vm_link, &vma->vm->bound_list); + list_add_tail(&vma->vm_link, &vma->vm->bound_list); mutex_unlock(&vma->vm->mutex); } @@ -1248,6 +1249,7 @@ static int exercise_mock(struct drm_i915_private *i915, unsigned long end_time)) { const u64 limit = totalram_pages() << PAGE_SHIFT; + struct i915_address_space *vm; struct i915_gem_context *ctx; IGT_TIMEOUT(end_time); int err; @@ -1256,7 +1258,9 @@ static int exercise_mock(struct drm_i915_private *i915, if (!ctx) return -ENOMEM; - err = func(i915, ctx->vm, 0, min(ctx->vm->total, limit), end_time); + vm = i915_gem_context_get_vm_rcu(ctx); + err = func(i915, vm, 0, min(vm->total, limit), end_time); + i915_vm_put(vm); mock_context_close(ctx); return err; @@ -1294,6 +1298,7 @@ static int igt_gtt_reserve(void *arg) { struct i915_ggtt *ggtt = arg; struct drm_i915_gem_object *obj, *on; + I915_RND_STATE(prng); LIST_HEAD(objects); u64 total; int err = -ENODEV; @@ -1330,11 +1335,13 @@ static int igt_gtt_reserve(void *arg) goto out; } + mutex_lock(&ggtt->vm.mutex); err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, obj->base.size, total, obj->cache_level, 0); + mutex_unlock(&ggtt->vm.mutex); if (err) { pr_err("i915_gem_gtt_reserve (pass 1) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1380,11 +1387,13 @@ static int igt_gtt_reserve(void *arg) goto out; } + mutex_lock(&ggtt->vm.mutex); err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, obj->base.size, total, obj->cache_level, 0); + mutex_unlock(&ggtt->vm.mutex); if (err) { pr_err("i915_gem_gtt_reserve (pass 2) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1420,15 +1429,18 @@ static int igt_gtt_reserve(void *arg) goto out; } - offset = random_offset(0, ggtt->vm.total, - 2*I915_GTT_PAGE_SIZE, - I915_GTT_MIN_ALIGNMENT); + offset = igt_random_offset(&prng, + 0, ggtt->vm.total, + 2 * I915_GTT_PAGE_SIZE, + I915_GTT_MIN_ALIGNMENT); + mutex_lock(&ggtt->vm.mutex); err = i915_gem_gtt_reserve(&ggtt->vm, &vma->node, obj->base.size, offset, obj->cache_level, 0); + mutex_unlock(&ggtt->vm.mutex); if (err) { pr_err("i915_gem_gtt_reserve (pass 3) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1497,11 +1509,13 @@ static int igt_gtt_insert(void *arg) /* Check a couple of obviously invalid requests */ for (ii = invalid_insert; ii->size; ii++) { + mutex_lock(&ggtt->vm.mutex); err = i915_gem_gtt_insert(&ggtt->vm, &tmp, ii->size, ii->alignment, I915_COLOR_UNEVICTABLE, ii->start, ii->end, 0); + mutex_unlock(&ggtt->vm.mutex); if (err != -ENOSPC) { pr_err("Invalid i915_gem_gtt_insert(.size=%llx, .alignment=%llx, .start=%llx, .end=%llx) succeeded (err=%d)\n", ii->size, ii->alignment, ii->start, ii->end, @@ -1537,10 +1551,12 @@ static int igt_gtt_insert(void *arg) goto out; } + mutex_lock(&ggtt->vm.mutex); err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, obj->base.size, 0, obj->cache_level, 0, ggtt->vm.total, 0); + mutex_unlock(&ggtt->vm.mutex); if (err == -ENOSPC) { /* maxed out the GGTT space */ i915_gem_object_put(obj); @@ -1595,10 +1611,12 @@ static int igt_gtt_insert(void *arg) goto out; } + mutex_lock(&ggtt->vm.mutex); err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, obj->base.size, 0, obj->cache_level, 0, ggtt->vm.total, 0); + mutex_unlock(&ggtt->vm.mutex); if (err) { pr_err("i915_gem_gtt_insert (pass 2) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1642,10 +1660,12 @@ static int igt_gtt_insert(void *arg) goto out; } + mutex_lock(&ggtt->vm.mutex); err = i915_gem_gtt_insert(&ggtt->vm, &vma->node, obj->base.size, 0, obj->cache_level, 0, ggtt->vm.total, 0); + mutex_unlock(&ggtt->vm.mutex); if (err) { pr_err("i915_gem_gtt_insert (pass 3) failed at %llu/%llu with err=%d\n", total, ggtt->vm.total, err); @@ -1689,13 +1709,10 @@ int i915_gem_gtt_mock_selftests(void) } mock_init_ggtt(i915, ggtt); - mutex_lock(&i915->drm.struct_mutex); err = i915_subtests(tests, ggtt); - mock_device_flush(i915); - mutex_unlock(&i915->drm.struct_mutex); + mock_device_flush(i915); i915_gem_drain_freed_objects(i915); - mock_fini_ggtt(ggtt); kfree(ggtt); out_put: @@ -1703,6 +1720,312 @@ out_put: return err; } +static int context_sync(struct intel_context *ce) +{ + struct i915_request *rq; + long timeout; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_get(rq); + i915_request_add(rq); + + timeout = i915_request_wait(rq, 0, HZ / 5); + i915_request_put(rq); + + return timeout < 0 ? -EIO : 0; +} + +static struct i915_request * +submit_batch(struct intel_context *ce, u64 addr) +{ + struct i915_request *rq; + int err; + + rq = intel_context_create_request(ce); + if (IS_ERR(rq)) + return rq; + + err = 0; + if (rq->engine->emit_init_breadcrumb) /* detect a hang */ + err = rq->engine->emit_init_breadcrumb(rq); + if (err == 0) + err = rq->engine->emit_bb_start(rq, addr, 0, 0); + + if (err == 0) + i915_request_get(rq); + i915_request_add(rq); + + return err ? ERR_PTR(err) : rq; +} + +static u32 *spinner(u32 *batch, int i) +{ + return batch + i * 64 / sizeof(*batch) + 4; +} + +static void end_spin(u32 *batch, int i) +{ + *spinner(batch, i) = MI_BATCH_BUFFER_END; + wmb(); +} + +static int igt_cs_tlb(void *arg) +{ + const unsigned int count = PAGE_SIZE / 64; + const unsigned int chunk_size = count * PAGE_SIZE; + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *bbe, *act, *out; + struct i915_gem_engines_iter it; + struct i915_address_space *vm; + struct i915_gem_context *ctx; + struct intel_context *ce; + struct drm_file *file; + struct i915_vma *vma; + I915_RND_STATE(prng); + unsigned int i; + u32 *result; + u32 *batch; + int err = 0; + + /* + * Our mission here is to fool the hardware to execute something + * from scratch as it has not seen the batch move (due to missing + * the TLB invalidate). + */ + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_unlock; + } + + vm = i915_gem_context_get_vm_rcu(ctx); + if (i915_is_ggtt(vm)) + goto out_vm; + + /* Create two pages; dummy we prefill the TLB, and intended */ + bbe = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(bbe)) { + err = PTR_ERR(bbe); + goto out_vm; + } + + batch = i915_gem_object_pin_map(bbe, I915_MAP_WC); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto out_put_bbe; + } + memset32(batch, MI_BATCH_BUFFER_END, PAGE_SIZE / sizeof(u32)); + i915_gem_object_flush_map(bbe); + i915_gem_object_unpin_map(bbe); + + act = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(act)) { + err = PTR_ERR(act); + goto out_put_bbe; + } + + /* Track the execution of each request by writing into different slot */ + batch = i915_gem_object_pin_map(act, I915_MAP_WC); + if (IS_ERR(batch)) { + err = PTR_ERR(batch); + goto out_put_act; + } + for (i = 0; i < count; i++) { + u32 *cs = batch + i * 64 / sizeof(*cs); + u64 addr = (vm->total - PAGE_SIZE) + i * sizeof(u32); + + GEM_BUG_ON(INTEL_GEN(i915) < 6); + cs[0] = MI_STORE_DWORD_IMM_GEN4; + if (INTEL_GEN(i915) >= 8) { + cs[1] = lower_32_bits(addr); + cs[2] = upper_32_bits(addr); + cs[3] = i; + cs[4] = MI_NOOP; + cs[5] = MI_BATCH_BUFFER_START_GEN8; + } else { + cs[1] = 0; + cs[2] = lower_32_bits(addr); + cs[3] = i; + cs[4] = MI_NOOP; + cs[5] = MI_BATCH_BUFFER_START; + } + } + + out = i915_gem_object_create_internal(i915, PAGE_SIZE); + if (IS_ERR(out)) { + err = PTR_ERR(out); + goto out_put_batch; + } + i915_gem_object_set_cache_coherency(out, I915_CACHING_CACHED); + + vma = i915_vma_instance(out, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_put_batch; + } + + err = i915_vma_pin(vma, 0, 0, + PIN_USER | + PIN_OFFSET_FIXED | + (vm->total - PAGE_SIZE)); + if (err) + goto out_put_out; + GEM_BUG_ON(vma->node.start != vm->total - PAGE_SIZE); + + result = i915_gem_object_pin_map(out, I915_MAP_WB); + if (IS_ERR(result)) { + err = PTR_ERR(result); + goto out_put_out; + } + + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + IGT_TIMEOUT(end_time); + unsigned long pass = 0; + + if (!intel_engine_can_store_dword(ce->engine)) + continue; + + while (!__igt_timeout(end_time, NULL)) { + struct i915_request *rq; + u64 offset; + + offset = igt_random_offset(&prng, + 0, vm->total - PAGE_SIZE, + chunk_size, PAGE_SIZE); + + err = vm->allocate_va_range(vm, offset, chunk_size); + if (err) + goto end; + + memset32(result, STACK_MAGIC, PAGE_SIZE / sizeof(u32)); + + vma = i915_vma_instance(bbe, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto end; + } + + err = vma->ops->set_pages(vma); + if (err) + goto end; + + /* Prime the TLB with the dummy pages */ + for (i = 0; i < count; i++) { + vma->node.start = offset + i * PAGE_SIZE; + vm->insert_entries(vm, vma, I915_CACHE_NONE, 0); + + rq = submit_batch(ce, vma->node.start); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto end; + } + i915_request_put(rq); + } + + vma->ops->clear_pages(vma); + + err = context_sync(ce); + if (err) { + pr_err("%s: dummy setup timed out\n", + ce->engine->name); + goto end; + } + + vma = i915_vma_instance(act, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto end; + } + + err = vma->ops->set_pages(vma); + if (err) + goto end; + + /* Replace the TLB with target batches */ + for (i = 0; i < count; i++) { + struct i915_request *rq; + u32 *cs = batch + i * 64 / sizeof(*cs); + u64 addr; + + vma->node.start = offset + i * PAGE_SIZE; + vm->insert_entries(vm, vma, I915_CACHE_NONE, 0); + + addr = vma->node.start + i * 64; + cs[4] = MI_NOOP; + cs[6] = lower_32_bits(addr); + cs[7] = upper_32_bits(addr); + wmb(); + + rq = submit_batch(ce, addr); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto end; + } + + /* Wait until the context chain has started */ + if (i == 0) { + while (READ_ONCE(result[i]) && + !i915_request_completed(rq)) + cond_resched(); + } else { + end_spin(batch, i - 1); + } + + i915_request_put(rq); + } + end_spin(batch, count - 1); + + vma->ops->clear_pages(vma); + + err = context_sync(ce); + if (err) { + pr_err("%s: writes timed out\n", + ce->engine->name); + goto end; + } + + for (i = 0; i < count; i++) { + if (result[i] != i) { + pr_err("%s: Write lost on pass %lu, at offset %llx, index %d, found %x, expected %x\n", + ce->engine->name, pass, + offset, i, result[i], i); + err = -EINVAL; + goto end; + } + } + + vm->clear_range(vm, offset, chunk_size); + pass++; + } + } +end: + if (igt_flush_test(i915)) + err = -EIO; + i915_gem_context_unlock_engines(ctx); + i915_gem_object_unpin_map(out); +out_put_out: + i915_gem_object_put(out); +out_put_batch: + i915_gem_object_unpin_map(act); +out_put_act: + i915_gem_object_put(act); +out_put_bbe: + i915_gem_object_put(bbe); +out_vm: + i915_vm_put(vm); +out_unlock: + mock_file_free(i915, file); + return err; +} + int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { @@ -1720,6 +2043,7 @@ int i915_gem_gtt_live_selftests(struct drm_i915_private *i915) SUBTEST(igt_ggtt_pot), SUBTEST(igt_ggtt_fill), SUBTEST(igt_ggtt_page), + SUBTEST(igt_cs_tlb), }; GEM_BUG_ON(offset_in_page(i915->ggtt.vm.total)); diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h index 1ccf0f731ac0..4b3cac73e291 100644 --- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h @@ -15,6 +15,9 @@ selftest(workarounds, intel_workarounds_live_selftests) selftest(gt_engines, intel_engine_live_selftests) selftest(gt_timelines, intel_timeline_live_selftests) selftest(gt_contexts, intel_context_live_selftests) +selftest(gt_lrc, intel_lrc_live_selftests) +selftest(gt_pm, intel_gt_pm_live_selftests) +selftest(gt_heartbeat, intel_heartbeat_live_selftests) selftest(requests, i915_request_live_selftests) selftest(active, i915_active_live_selftests) selftest(objects, i915_gem_object_live_selftests) @@ -30,6 +33,8 @@ selftest(gem_contexts, i915_gem_context_live_selftests) selftest(blt, i915_gem_object_blt_live_selftests) selftest(client, i915_gem_client_blt_live_selftests) selftest(reset, intel_reset_live_selftests) +selftest(memory_region, intel_memory_region_live_selftests) selftest(hangcheck, intel_hangcheck_live_selftests) selftest(execlists, intel_execlists_live_selftests) selftest(guc, intel_guc_live_selftest) +selftest(perf, i915_perf_live_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h index b88084fe3269..aa5a0e7f5d9e 100644 --- a/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h +++ b/drivers/gpu/drm/i915/selftests/i915_mock_selftests.h @@ -26,3 +26,4 @@ selftest(gtt, i915_gem_gtt_mock_selftests) selftest(hugepages, i915_gem_huge_page_mock_selftests) selftest(contexts, i915_gem_context_mock_selftests) selftest(buddy, i915_buddy_mock_selftests) +selftest(memory_region, intel_memory_region_mock_selftests) diff --git a/drivers/gpu/drm/i915/selftests/i915_perf.c b/drivers/gpu/drm/i915/selftests/i915_perf.c new file mode 100644 index 000000000000..aabd07f67e49 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/i915_perf.c @@ -0,0 +1,217 @@ +/* + * SPDX-License-Identifier: MIT + * + * Copyright © 2019 Intel Corporation + */ + +#include <linux/kref.h> + +#include "gem/i915_gem_pm.h" +#include "gt/intel_gt.h" + +#include "i915_selftest.h" + +#include "igt_flush_test.h" +#include "lib_sw_fence.h" + +static struct i915_perf_stream * +test_stream(struct i915_perf *perf) +{ + struct drm_i915_perf_open_param param = {}; + struct perf_open_properties props = { + .engine = intel_engine_lookup_user(perf->i915, + I915_ENGINE_CLASS_RENDER, + 0), + .sample_flags = SAMPLE_OA_REPORT, + .oa_format = IS_GEN(perf->i915, 12) ? + I915_OA_FORMAT_A32u40_A4u32_B8_C8 : I915_OA_FORMAT_C4_B8, + .metrics_set = 1, + }; + struct i915_perf_stream *stream; + + stream = kzalloc(sizeof(*stream), GFP_KERNEL); + if (!stream) + return NULL; + + stream->perf = perf; + + mutex_lock(&perf->lock); + if (i915_oa_stream_init(stream, ¶m, &props)) { + kfree(stream); + stream = NULL; + } + mutex_unlock(&perf->lock); + + return stream; +} + +static void stream_destroy(struct i915_perf_stream *stream) +{ + struct i915_perf *perf = stream->perf; + + mutex_lock(&perf->lock); + i915_perf_destroy_locked(stream); + mutex_unlock(&perf->lock); +} + +static int live_sanitycheck(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_perf_stream *stream; + + /* Quick check we can create a perf stream */ + + stream = test_stream(&i915->perf); + if (!stream) + return -EINVAL; + + stream_destroy(stream); + return 0; +} + +static int write_timestamp(struct i915_request *rq, int slot) +{ + u32 *cs; + int len; + + cs = intel_ring_begin(rq, 6); + if (IS_ERR(cs)) + return PTR_ERR(cs); + + len = 5; + if (INTEL_GEN(rq->i915) >= 8) + len++; + + *cs++ = GFX_OP_PIPE_CONTROL(len); + *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | + PIPE_CONTROL_STORE_DATA_INDEX | + PIPE_CONTROL_WRITE_TIMESTAMP; + *cs++ = slot * sizeof(u32); + *cs++ = 0; + *cs++ = 0; + *cs++ = 0; + + intel_ring_advance(rq, cs); + + return 0; +} + +static ktime_t poll_status(struct i915_request *rq, int slot) +{ + while (!intel_read_status_page(rq->engine, slot) && + !i915_request_completed(rq)) + cpu_relax(); + + return ktime_get(); +} + +static int live_noa_delay(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct i915_perf_stream *stream; + struct i915_request *rq; + ktime_t t0, t1; + u64 expected; + u32 delay; + int err; + int i; + + /* Check that the GPU delays matches expectations */ + + stream = test_stream(&i915->perf); + if (!stream) + return -ENOMEM; + + expected = atomic64_read(&stream->perf->noa_programming_delay); + + if (stream->engine->class != RENDER_CLASS) { + err = -ENODEV; + goto out; + } + + for (i = 0; i < 4; i++) + intel_write_status_page(stream->engine, 0x100 + i, 0); + + rq = i915_request_create(stream->engine->kernel_context); + if (IS_ERR(rq)) { + err = PTR_ERR(rq); + goto out; + } + + if (rq->engine->emit_init_breadcrumb && + i915_request_timeline(rq)->has_initial_breadcrumb) { + err = rq->engine->emit_init_breadcrumb(rq); + if (err) { + i915_request_add(rq); + goto out; + } + } + + err = write_timestamp(rq, 0x100); + if (err) { + i915_request_add(rq); + goto out; + } + + err = rq->engine->emit_bb_start(rq, + i915_ggtt_offset(stream->noa_wait), 0, + I915_DISPATCH_SECURE); + if (err) { + i915_request_add(rq); + goto out; + } + + err = write_timestamp(rq, 0x102); + if (err) { + i915_request_add(rq); + goto out; + } + + i915_request_get(rq); + i915_request_add(rq); + + preempt_disable(); + t0 = poll_status(rq, 0x100); + t1 = poll_status(rq, 0x102); + preempt_enable(); + + pr_info("CPU delay: %lluns, expected %lluns\n", + ktime_sub(t1, t0), expected); + + delay = intel_read_status_page(stream->engine, 0x102); + delay -= intel_read_status_page(stream->engine, 0x100); + delay = div_u64(mul_u32_u32(delay, 1000 * 1000), + RUNTIME_INFO(i915)->cs_timestamp_frequency_khz); + pr_info("GPU delay: %uns, expected %lluns\n", + delay, expected); + + if (4 * delay < 3 * expected || 2 * delay > 3 * expected) { + pr_err("GPU delay [%uus] outside of expected threshold! [%lluus, %lluus]\n", + delay / 1000, + div_u64(3 * expected, 4000), + div_u64(3 * expected, 2000)); + err = -EINVAL; + } + + i915_request_put(rq); +out: + stream_destroy(stream); + return err; +} + +int i915_perf_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(live_sanitycheck), + SUBTEST(live_noa_delay), + }; + struct i915_perf *perf = &i915->perf; + + if (!perf->metrics_kobj || !perf->ops.enable_metric_set) + return 0; + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return i915_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/selftests/i915_random.c b/drivers/gpu/drm/i915/selftests/i915_random.c index 716a3f19f030..abdfadcf626b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.c +++ b/drivers/gpu/drm/i915/selftests/i915_random.c @@ -29,6 +29,7 @@ #include <linux/types.h> #include "i915_random.h" +#include "i915_utils.h" u64 i915_prandom_u64_state(struct rnd_state *rnd) { @@ -87,3 +88,22 @@ unsigned int *i915_random_order(unsigned int count, struct rnd_state *state) i915_random_reorder(order, count, state); return order; } + +u64 igt_random_offset(struct rnd_state *state, + u64 start, u64 end, + u64 len, u64 align) +{ + u64 range, addr; + + BUG_ON(range_overflows(start, len, end)); + BUG_ON(round_up(start, align) > round_down(end - len, align)); + + range = round_down(end - len, align) - round_up(start, align); + if (range) { + addr = i915_prandom_u64_state(state); + div64_u64_rem(addr, range, &addr); + start += addr; + } + + return round_up(start, align); +} diff --git a/drivers/gpu/drm/i915/selftests/i915_random.h b/drivers/gpu/drm/i915/selftests/i915_random.h index 8e1ff9c105b6..35cc69a3a1b9 100644 --- a/drivers/gpu/drm/i915/selftests/i915_random.h +++ b/drivers/gpu/drm/i915/selftests/i915_random.h @@ -57,4 +57,8 @@ void i915_random_reorder(unsigned int *order, void i915_prandom_shuffle(void *arr, size_t elsz, size_t count, struct rnd_state *state); +u64 igt_random_offset(struct rnd_state *state, + u64 start, u64 end, + u64 len, u64 align); + #endif /* !__I915_SELFTESTS_RANDOM_H__ */ diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index b3688543ed7d..8618a4dc0701 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -37,25 +37,32 @@ #include "mock_drm.h" #include "mock_gem_device.h" +static unsigned int num_uabi_engines(struct drm_i915_private *i915) +{ + struct intel_engine_cs *engine; + unsigned int count; + + count = 0; + for_each_uabi_engine(engine, i915) + count++; + + return count; +} + static int igt_add_request(void *arg) { struct drm_i915_private *i915 = arg; struct i915_request *request; - int err = -ENOMEM; /* Basic preliminary test to create a request and let it loose! */ - mutex_lock(&i915->drm.struct_mutex); request = mock_request(i915->engine[RCS0]->kernel_context, HZ / 10); if (!request) - goto out_unlock; + return -ENOMEM; i915_request_add(request); - err = 0; -out_unlock: - mutex_unlock(&i915->drm.struct_mutex); - return err; + return 0; } static int igt_wait_request(void *arg) @@ -67,12 +74,10 @@ static int igt_wait_request(void *arg) /* Submit a request, then wait upon it */ - mutex_lock(&i915->drm.struct_mutex); request = mock_request(i915->engine[RCS0]->kernel_context, T); - if (!request) { - err = -ENOMEM; - goto out_unlock; - } + if (!request) + return -ENOMEM; + i915_request_get(request); if (i915_request_wait(request, 0, 0) != -ETIME) { @@ -125,9 +130,7 @@ static int igt_wait_request(void *arg) err = 0; out_request: i915_request_put(request); -out_unlock: mock_device_flush(i915); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -140,52 +143,45 @@ static int igt_fence_wait(void *arg) /* Submit a request, treat it as a fence and wait upon it */ - mutex_lock(&i915->drm.struct_mutex); request = mock_request(i915->engine[RCS0]->kernel_context, T); - if (!request) { - err = -ENOMEM; - goto out_locked; - } + if (!request) + return -ENOMEM; if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) { pr_err("fence wait success before submit (expected timeout)!\n"); - goto out_locked; + goto out; } i915_request_add(request); - mutex_unlock(&i915->drm.struct_mutex); if (dma_fence_is_signaled(&request->fence)) { pr_err("fence signaled immediately!\n"); - goto out_device; + goto out; } if (dma_fence_wait_timeout(&request->fence, false, T / 2) != -ETIME) { pr_err("fence wait success after submit (expected timeout)!\n"); - goto out_device; + goto out; } if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) { pr_err("fence wait timed out (expected success)!\n"); - goto out_device; + goto out; } if (!dma_fence_is_signaled(&request->fence)) { pr_err("fence unsignaled after waiting!\n"); - goto out_device; + goto out; } if (dma_fence_wait_timeout(&request->fence, false, T) <= 0) { pr_err("fence wait timed out when complete (expected success)!\n"); - goto out_device; + goto out; } err = 0; -out_device: - mutex_lock(&i915->drm.struct_mutex); -out_locked: +out: mock_device_flush(i915); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -197,8 +193,8 @@ static int igt_request_rewind(void *arg) struct intel_context *ce; int err = -EINVAL; - mutex_lock(&i915->drm.struct_mutex); ctx[0] = mock_context(i915, "A"); + ce = i915_gem_context_get_engine(ctx[0], RCS0); GEM_BUG_ON(IS_ERR(ce)); request = mock_request(ce, 2 * HZ); @@ -212,6 +208,7 @@ static int igt_request_rewind(void *arg) i915_request_add(request); ctx[1] = mock_context(i915, "B"); + ce = i915_gem_context_get_engine(ctx[1], RCS0); GEM_BUG_ON(IS_ERR(ce)); vip = mock_request(ce, 0); @@ -233,7 +230,6 @@ static int igt_request_rewind(void *arg) request->engine->submit_request(request); rcu_read_unlock(); - mutex_unlock(&i915->drm.struct_mutex); if (i915_request_wait(vip, 0, HZ) == -ETIME) { pr_err("timed out waiting for high priority request\n"); @@ -248,14 +244,12 @@ static int igt_request_rewind(void *arg) err = 0; err: i915_request_put(vip); - mutex_lock(&i915->drm.struct_mutex); err_context_1: mock_context_close(ctx[1]); i915_request_put(request); err_context_0: mock_context_close(ctx[0]); mock_device_flush(i915); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -282,7 +276,6 @@ __live_request_alloc(struct intel_context *ce) static int __igt_breadcrumbs_smoketest(void *arg) { struct smoketest *t = arg; - struct mutex * const BKL = &t->engine->i915->drm.struct_mutex; const unsigned int max_batch = min(t->ncontexts, t->max_batch) - 1; const unsigned int total = 4 * t->ncontexts + 1; unsigned int num_waits = 0, num_fences = 0; @@ -300,7 +293,7 @@ static int __igt_breadcrumbs_smoketest(void *arg) * that the fences were marked as signaled. */ - requests = kmalloc_array(total, sizeof(*requests), GFP_KERNEL); + requests = kcalloc(total, sizeof(*requests), GFP_KERNEL); if (!requests) return -ENOMEM; @@ -337,14 +330,11 @@ static int __igt_breadcrumbs_smoketest(void *arg) struct i915_request *rq; struct intel_context *ce; - mutex_lock(BKL); - ce = i915_gem_context_get_engine(ctx, t->engine->legacy_idx); GEM_BUG_ON(IS_ERR(ce)); rq = t->request_alloc(ce); intel_context_put(ce); if (IS_ERR(rq)) { - mutex_unlock(BKL); err = PTR_ERR(rq); count = n; break; @@ -357,8 +347,6 @@ static int __igt_breadcrumbs_smoketest(void *arg) requests[n] = i915_request_get(rq); i915_request_add(rq); - mutex_unlock(BKL); - if (err >= 0) err = i915_sw_fence_await_dma_fence(wait, &rq->fence, @@ -446,18 +434,16 @@ static int mock_breadcrumbs_smoketest(void *arg) * See __igt_breadcrumbs_smoketest(); */ - threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL); + threads = kcalloc(ncpus, sizeof(*threads), GFP_KERNEL); if (!threads) return -ENOMEM; - t.contexts = - kmalloc_array(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL); + t.contexts = kcalloc(t.ncontexts, sizeof(*t.contexts), GFP_KERNEL); if (!t.contexts) { ret = -ENOMEM; goto out_threads; } - mutex_lock(&t.engine->i915->drm.struct_mutex); for (n = 0; n < t.ncontexts; n++) { t.contexts[n] = mock_context(t.engine->i915, "mock"); if (!t.contexts[n]) { @@ -465,7 +451,6 @@ static int mock_breadcrumbs_smoketest(void *arg) goto out_contexts; } } - mutex_unlock(&t.engine->i915->drm.struct_mutex); for (n = 0; n < ncpus; n++) { threads[n] = kthread_run(__igt_breadcrumbs_smoketest, @@ -479,6 +464,7 @@ static int mock_breadcrumbs_smoketest(void *arg) get_task_struct(threads[n]); } + yield(); /* start all threads before we begin */ msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); for (n = 0; n < ncpus; n++) { @@ -495,18 +481,15 @@ static int mock_breadcrumbs_smoketest(void *arg) atomic_long_read(&t.num_fences), ncpus); - mutex_lock(&t.engine->i915->drm.struct_mutex); out_contexts: for (n = 0; n < t.ncontexts; n++) { if (!t.contexts[n]) break; mock_context_close(t.contexts[n]); } - mutex_unlock(&t.engine->i915->drm.struct_mutex); kfree(t.contexts); out_threads: kfree(threads); - return ret; } @@ -539,40 +522,37 @@ static int live_nop_request(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; - intel_wakeref_t wakeref; struct igt_live_test t; - unsigned int id; int err = -ENODEV; - /* Submit various sized batches of empty requests, to each engine + /* + * Submit various sized batches of empty requests, to each engine * (individually), and wait for the batch to complete. We can check * the overhead of submitting requests to the hardware. */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - - for_each_engine(engine, i915, id) { - struct i915_request *request = NULL; + for_each_uabi_engine(engine, i915) { unsigned long n, prime; IGT_TIMEOUT(end_time); ktime_t times[2] = {}; err = igt_live_test_begin(&t, i915, __func__, engine->name); if (err) - goto out_unlock; + return err; for_each_prime_number_from(prime, 1, 8192) { + struct i915_request *request = NULL; + times[1] = ktime_get_raw(); for (n = 0; n < prime; n++) { + i915_request_put(request); request = i915_request_create(engine->kernel_context); - if (IS_ERR(request)) { - err = PTR_ERR(request); - goto out_unlock; - } + if (IS_ERR(request)) + return PTR_ERR(request); - /* This space is left intentionally blank. + /* + * This space is left intentionally blank. * * We do not actually want to perform any * action with this request, we just want @@ -585,9 +565,11 @@ static int live_nop_request(void *arg) * for latency. */ + i915_request_get(request); i915_request_add(request); } i915_request_wait(request, 0, MAX_SCHEDULE_TIMEOUT); + i915_request_put(request); times[1] = ktime_sub(ktime_get_raw(), times[1]); if (prime == 1) @@ -599,7 +581,7 @@ static int live_nop_request(void *arg) err = igt_live_test_end(&t); if (err) - goto out_unlock; + return err; pr_info("Request latencies on %s: 1 = %lluns, %lu = %lluns\n", engine->name, @@ -607,9 +589,6 @@ static int live_nop_request(void *arg) prime, div64_u64(ktime_to_ns(times[1]), prime)); } -out_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -647,8 +626,15 @@ static struct i915_vma *empty_batch(struct drm_i915_private *i915) if (err) goto err; + /* Force the wait wait now to avoid including it in the benchmark */ + err = i915_vma_sync(vma); + if (err) + goto err_pin; + return vma; +err_pin: + i915_vma_unpin(vma); err: i915_gem_object_put(obj); return ERR_PTR(err); @@ -672,6 +658,7 @@ empty_request(struct intel_engine_cs *engine, if (err) goto out_request; + i915_request_get(request); out_request: i915_request_add(request); return err ? ERR_PTR(err) : request; @@ -681,27 +668,21 @@ static int live_empty_request(void *arg) { struct drm_i915_private *i915 = arg; struct intel_engine_cs *engine; - intel_wakeref_t wakeref; struct igt_live_test t; struct i915_vma *batch; - unsigned int id; int err = 0; - /* Submit various sized batches of empty requests, to each engine + /* + * Submit various sized batches of empty requests, to each engine * (individually), and wait for the batch to complete. We can check * the overhead of submitting requests to the hardware. */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); - batch = empty_batch(i915); - if (IS_ERR(batch)) { - err = PTR_ERR(batch); - goto out_unlock; - } + if (IS_ERR(batch)) + return PTR_ERR(batch); - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { IGT_TIMEOUT(end_time); struct i915_request *request; unsigned long n, prime; @@ -723,6 +704,7 @@ static int live_empty_request(void *arg) times[1] = ktime_get_raw(); for (n = 0; n < prime; n++) { + i915_request_put(request); request = empty_request(engine, batch); if (IS_ERR(request)) { err = PTR_ERR(request); @@ -738,6 +720,7 @@ static int live_empty_request(void *arg) if (__igt_timeout(end_time, NULL)) break; } + i915_request_put(request); err = igt_live_test_end(&t); if (err) @@ -752,18 +735,15 @@ static int live_empty_request(void *arg) out_batch: i915_vma_unpin(batch); i915_vma_put(batch); -out_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); return err; } static struct i915_vma *recursive_batch(struct drm_i915_private *i915) { struct i915_gem_context *ctx = i915->kernel_context; - struct i915_address_space *vm = ctx->vm ?: &i915->ggtt.vm; struct drm_i915_gem_object *obj; const int gen = INTEL_GEN(i915); + struct i915_address_space *vm; struct i915_vma *vma; u32 *cmd; int err; @@ -772,7 +752,9 @@ static struct i915_vma *recursive_batch(struct drm_i915_private *i915) if (IS_ERR(obj)) return ERR_CAST(obj); + vm = i915_gem_context_get_vm_rcu(ctx); vma = i915_vma_instance(obj, vm, NULL); + i915_vm_put(vm); if (IS_ERR(vma)) { err = PTR_ERR(vma); goto err; @@ -832,67 +814,73 @@ static int recursive_batch_resolve(struct i915_vma *batch) static int live_all_engines(void *arg) { struct drm_i915_private *i915 = arg; + const unsigned int nengines = num_uabi_engines(i915); struct intel_engine_cs *engine; - struct i915_request *request[I915_NUM_ENGINES]; - intel_wakeref_t wakeref; + struct i915_request **request; struct igt_live_test t; struct i915_vma *batch; - unsigned int id; + unsigned int idx; int err; - /* Check we can submit requests to all engines simultaneously. We + /* + * Check we can submit requests to all engines simultaneously. We * send a recursive batch to each engine - checking that we don't * block doing so, and that they don't complete too soon. */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + request = kcalloc(nengines, sizeof(*request), GFP_KERNEL); + if (!request) + return -ENOMEM; err = igt_live_test_begin(&t, i915, __func__, ""); if (err) - goto out_unlock; + goto out_free; batch = recursive_batch(i915); if (IS_ERR(batch)) { err = PTR_ERR(batch); pr_err("%s: Unable to create batch, err=%d\n", __func__, err); - goto out_unlock; + goto out_free; } - for_each_engine(engine, i915, id) { - request[id] = i915_request_create(engine->kernel_context); - if (IS_ERR(request[id])) { - err = PTR_ERR(request[id]); + idx = 0; + for_each_uabi_engine(engine, i915) { + request[idx] = i915_request_create(engine->kernel_context); + if (IS_ERR(request[idx])) { + err = PTR_ERR(request[idx]); pr_err("%s: Request allocation failed with err=%d\n", __func__, err); goto out_request; } - err = engine->emit_bb_start(request[id], + err = engine->emit_bb_start(request[idx], batch->node.start, batch->node.size, 0); GEM_BUG_ON(err); - request[id]->batch = batch; + request[idx]->batch = batch; i915_vma_lock(batch); - err = i915_request_await_object(request[id], batch->obj, 0); + err = i915_request_await_object(request[idx], batch->obj, 0); if (err == 0) - err = i915_vma_move_to_active(batch, request[id], 0); + err = i915_vma_move_to_active(batch, request[idx], 0); i915_vma_unlock(batch); GEM_BUG_ON(err); - i915_request_get(request[id]); - i915_request_add(request[id]); + i915_request_get(request[idx]); + i915_request_add(request[idx]); + idx++; } - for_each_engine(engine, i915, id) { - if (i915_request_completed(request[id])) { + idx = 0; + for_each_uabi_engine(engine, i915) { + if (i915_request_completed(request[idx])) { pr_err("%s(%s): request completed too early!\n", __func__, engine->name); err = -EINVAL; goto out_request; } + idx++; } err = recursive_batch_resolve(batch); @@ -901,10 +889,11 @@ static int live_all_engines(void *arg) goto out_request; } - for_each_engine(engine, i915, id) { + idx = 0; + for_each_uabi_engine(engine, i915) { long timeout; - timeout = i915_request_wait(request[id], 0, + timeout = i915_request_wait(request[idx], 0, MAX_SCHEDULE_TIMEOUT); if (timeout < 0) { err = timeout; @@ -913,50 +902,56 @@ static int live_all_engines(void *arg) goto out_request; } - GEM_BUG_ON(!i915_request_completed(request[id])); - i915_request_put(request[id]); - request[id] = NULL; + GEM_BUG_ON(!i915_request_completed(request[idx])); + i915_request_put(request[idx]); + request[idx] = NULL; + idx++; } err = igt_live_test_end(&t); out_request: - for_each_engine(engine, i915, id) - if (request[id]) - i915_request_put(request[id]); + idx = 0; + for_each_uabi_engine(engine, i915) { + if (request[idx]) + i915_request_put(request[idx]); + idx++; + } i915_vma_unpin(batch); i915_vma_put(batch); -out_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); +out_free: + kfree(request); return err; } static int live_sequential_engines(void *arg) { struct drm_i915_private *i915 = arg; - struct i915_request *request[I915_NUM_ENGINES] = {}; + const unsigned int nengines = num_uabi_engines(i915); + struct i915_request **request; struct i915_request *prev = NULL; struct intel_engine_cs *engine; - intel_wakeref_t wakeref; struct igt_live_test t; - unsigned int id; + unsigned int idx; int err; - /* Check we can submit requests to all engines sequentially, such + /* + * Check we can submit requests to all engines sequentially, such * that each successive request waits for the earlier ones. This * tests that we don't execute requests out of order, even though * they are running on independent engines. */ - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + request = kcalloc(nengines, sizeof(*request), GFP_KERNEL); + if (!request) + return -ENOMEM; err = igt_live_test_begin(&t, i915, __func__, ""); if (err) - goto out_unlock; + goto out_free; - for_each_engine(engine, i915, id) { + idx = 0; + for_each_uabi_engine(engine, i915) { struct i915_vma *batch; batch = recursive_batch(i915); @@ -964,66 +959,69 @@ static int live_sequential_engines(void *arg) err = PTR_ERR(batch); pr_err("%s: Unable to create batch for %s, err=%d\n", __func__, engine->name, err); - goto out_unlock; + goto out_free; } - request[id] = i915_request_create(engine->kernel_context); - if (IS_ERR(request[id])) { - err = PTR_ERR(request[id]); + request[idx] = i915_request_create(engine->kernel_context); + if (IS_ERR(request[idx])) { + err = PTR_ERR(request[idx]); pr_err("%s: Request allocation failed for %s with err=%d\n", __func__, engine->name, err); goto out_request; } if (prev) { - err = i915_request_await_dma_fence(request[id], + err = i915_request_await_dma_fence(request[idx], &prev->fence); if (err) { - i915_request_add(request[id]); + i915_request_add(request[idx]); pr_err("%s: Request await failed for %s with err=%d\n", __func__, engine->name, err); goto out_request; } } - err = engine->emit_bb_start(request[id], + err = engine->emit_bb_start(request[idx], batch->node.start, batch->node.size, 0); GEM_BUG_ON(err); - request[id]->batch = batch; + request[idx]->batch = batch; i915_vma_lock(batch); - err = i915_request_await_object(request[id], batch->obj, false); + err = i915_request_await_object(request[idx], + batch->obj, false); if (err == 0) - err = i915_vma_move_to_active(batch, request[id], 0); + err = i915_vma_move_to_active(batch, request[idx], 0); i915_vma_unlock(batch); GEM_BUG_ON(err); - i915_request_get(request[id]); - i915_request_add(request[id]); + i915_request_get(request[idx]); + i915_request_add(request[idx]); - prev = request[id]; + prev = request[idx]; + idx++; } - for_each_engine(engine, i915, id) { + idx = 0; + for_each_uabi_engine(engine, i915) { long timeout; - if (i915_request_completed(request[id])) { + if (i915_request_completed(request[idx])) { pr_err("%s(%s): request completed too early!\n", __func__, engine->name); err = -EINVAL; goto out_request; } - err = recursive_batch_resolve(request[id]->batch); + err = recursive_batch_resolve(request[idx]->batch); if (err) { pr_err("%s: failed to resolve batch, err=%d\n", __func__, err); goto out_request; } - timeout = i915_request_wait(request[id], 0, + timeout = i915_request_wait(request[idx], 0, MAX_SCHEDULE_TIMEOUT); if (timeout < 0) { err = timeout; @@ -1032,33 +1030,156 @@ static int live_sequential_engines(void *arg) goto out_request; } - GEM_BUG_ON(!i915_request_completed(request[id])); + GEM_BUG_ON(!i915_request_completed(request[idx])); + idx++; } err = igt_live_test_end(&t); out_request: - for_each_engine(engine, i915, id) { + idx = 0; + for_each_uabi_engine(engine, i915) { u32 *cmd; - if (!request[id]) + if (!request[idx]) break; - cmd = i915_gem_object_pin_map(request[id]->batch->obj, + cmd = i915_gem_object_pin_map(request[idx]->batch->obj, I915_MAP_WC); if (!IS_ERR(cmd)) { *cmd = MI_BATCH_BUFFER_END; intel_gt_chipset_flush(engine->gt); - i915_gem_object_unpin_map(request[id]->batch->obj); + i915_gem_object_unpin_map(request[idx]->batch->obj); } - i915_vma_put(request[id]->batch); - i915_request_put(request[id]); + i915_vma_put(request[idx]->batch); + i915_request_put(request[idx]); + idx++; } -out_unlock: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); +out_free: + kfree(request); + return err; +} + +static int __live_parallel_engine1(void *arg) +{ + struct intel_engine_cs *engine = arg; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + struct i915_request *rq; + int err; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_get(rq); + i915_request_add(rq); + + err = 0; + if (i915_request_wait(rq, 0, HZ / 5) < 0) + err = -ETIME; + i915_request_put(rq); + if (err) + return err; + + count++; + } while (!__igt_timeout(end_time, NULL)); + + pr_info("%s: %lu request + sync\n", engine->name, count); + return 0; +} + +static int __live_parallel_engineN(void *arg) +{ + struct intel_engine_cs *engine = arg; + IGT_TIMEOUT(end_time); + unsigned long count; + + count = 0; + do { + struct i915_request *rq; + + rq = i915_request_create(engine->kernel_context); + if (IS_ERR(rq)) + return PTR_ERR(rq); + + i915_request_add(rq); + count++; + } while (!__igt_timeout(end_time, NULL)); + + pr_info("%s: %lu requests\n", engine->name, count); + return 0; +} + +static int live_parallel_engines(void *arg) +{ + struct drm_i915_private *i915 = arg; + static int (* const func[])(void *arg) = { + __live_parallel_engine1, + __live_parallel_engineN, + NULL, + }; + const unsigned int nengines = num_uabi_engines(i915); + struct intel_engine_cs *engine; + int (* const *fn)(void *arg); + struct task_struct **tsk; + int err = 0; + + /* + * Check we can submit requests to all engines concurrently. This + * tests that we load up the system maximally. + */ + + tsk = kcalloc(nengines, sizeof(*tsk), GFP_KERNEL); + if (!tsk) + return -ENOMEM; + + for (fn = func; !err && *fn; fn++) { + struct igt_live_test t; + unsigned int idx; + + err = igt_live_test_begin(&t, i915, __func__, ""); + if (err) + break; + + idx = 0; + for_each_uabi_engine(engine, i915) { + tsk[idx] = kthread_run(*fn, engine, + "igt/parallel:%s", + engine->name); + if (IS_ERR(tsk[idx])) { + err = PTR_ERR(tsk[idx]); + break; + } + get_task_struct(tsk[idx++]); + } + + yield(); /* start all threads before we kthread_stop() */ + + idx = 0; + for_each_uabi_engine(engine, i915) { + int status; + + if (IS_ERR(tsk[idx])) + break; + + status = kthread_stop(tsk[idx]); + if (status && !err) + err = status; + + put_task_struct(tsk[idx++]); + } + + if (igt_live_test_end(&t)) + err = -EIO; + } + + kfree(tsk); return err; } @@ -1102,16 +1223,16 @@ max_batches(struct i915_gem_context *ctx, struct intel_engine_cs *engine) static int live_breadcrumbs_smoketest(void *arg) { struct drm_i915_private *i915 = arg; - struct smoketest t[I915_NUM_ENGINES]; - unsigned int ncpus = num_online_cpus(); + const unsigned int nengines = num_uabi_engines(i915); + const unsigned int ncpus = num_online_cpus(); unsigned long num_waits, num_fences; struct intel_engine_cs *engine; struct task_struct **threads; struct igt_live_test live; - enum intel_engine_id id; intel_wakeref_t wakeref; struct drm_file *file; - unsigned int n; + struct smoketest *smoke; + unsigned int n, idx; int ret = 0; /* @@ -1130,29 +1251,31 @@ static int live_breadcrumbs_smoketest(void *arg) goto out_rpm; } - threads = kcalloc(ncpus * I915_NUM_ENGINES, - sizeof(*threads), - GFP_KERNEL); - if (!threads) { + smoke = kcalloc(nengines, sizeof(*smoke), GFP_KERNEL); + if (!smoke) { ret = -ENOMEM; goto out_file; } - memset(&t[0], 0, sizeof(t[0])); - t[0].request_alloc = __live_request_alloc; - t[0].ncontexts = 64; - t[0].contexts = kmalloc_array(t[0].ncontexts, - sizeof(*t[0].contexts), - GFP_KERNEL); - if (!t[0].contexts) { + threads = kcalloc(ncpus * nengines, sizeof(*threads), GFP_KERNEL); + if (!threads) { + ret = -ENOMEM; + goto out_smoke; + } + + smoke[0].request_alloc = __live_request_alloc; + smoke[0].ncontexts = 64; + smoke[0].contexts = kcalloc(smoke[0].ncontexts, + sizeof(*smoke[0].contexts), + GFP_KERNEL); + if (!smoke[0].contexts) { ret = -ENOMEM; goto out_threads; } - mutex_lock(&i915->drm.struct_mutex); - for (n = 0; n < t[0].ncontexts; n++) { - t[0].contexts[n] = live_context(i915, file); - if (!t[0].contexts[n]) { + for (n = 0; n < smoke[0].ncontexts; n++) { + smoke[0].contexts[n] = live_context(i915, file); + if (!smoke[0].contexts[n]) { ret = -ENOMEM; goto out_contexts; } @@ -1162,45 +1285,48 @@ static int live_breadcrumbs_smoketest(void *arg) if (ret) goto out_contexts; - for_each_engine(engine, i915, id) { - t[id] = t[0]; - t[id].engine = engine; - t[id].max_batch = max_batches(t[0].contexts[0], engine); - if (t[id].max_batch < 0) { - ret = t[id].max_batch; - mutex_unlock(&i915->drm.struct_mutex); + idx = 0; + for_each_uabi_engine(engine, i915) { + smoke[idx] = smoke[0]; + smoke[idx].engine = engine; + smoke[idx].max_batch = + max_batches(smoke[0].contexts[0], engine); + if (smoke[idx].max_batch < 0) { + ret = smoke[idx].max_batch; goto out_flush; } /* One ring interleaved between requests from all cpus */ - t[id].max_batch /= num_online_cpus() + 1; + smoke[idx].max_batch /= num_online_cpus() + 1; pr_debug("Limiting batches to %d requests on %s\n", - t[id].max_batch, engine->name); + smoke[idx].max_batch, engine->name); for (n = 0; n < ncpus; n++) { struct task_struct *tsk; tsk = kthread_run(__igt_breadcrumbs_smoketest, - &t[id], "igt/%d.%d", id, n); + &smoke[idx], "igt/%d.%d", idx, n); if (IS_ERR(tsk)) { ret = PTR_ERR(tsk); - mutex_unlock(&i915->drm.struct_mutex); goto out_flush; } get_task_struct(tsk); - threads[id * ncpus + n] = tsk; + threads[idx * ncpus + n] = tsk; } + + idx++; } - mutex_unlock(&i915->drm.struct_mutex); + yield(); /* start all threads before we begin */ msleep(jiffies_to_msecs(i915_selftest.timeout_jiffies)); out_flush: + idx = 0; num_waits = 0; num_fences = 0; - for_each_engine(engine, i915, id) { + for_each_uabi_engine(engine, i915) { for (n = 0; n < ncpus; n++) { - struct task_struct *tsk = threads[id * ncpus + n]; + struct task_struct *tsk = threads[idx * ncpus + n]; int err; if (!tsk) @@ -1213,19 +1339,20 @@ out_flush: put_task_struct(tsk); } - num_waits += atomic_long_read(&t[id].num_waits); - num_fences += atomic_long_read(&t[id].num_fences); + num_waits += atomic_long_read(&smoke[idx].num_waits); + num_fences += atomic_long_read(&smoke[idx].num_fences); + idx++; } pr_info("Completed %lu waits for %lu fences across %d engines and %d cpus\n", num_waits, num_fences, RUNTIME_INFO(i915)->num_engines, ncpus); - mutex_lock(&i915->drm.struct_mutex); ret = igt_live_test_end(&live) ?: ret; out_contexts: - mutex_unlock(&i915->drm.struct_mutex); - kfree(t[0].contexts); + kfree(smoke[0].contexts); out_threads: kfree(threads); +out_smoke: + kfree(smoke); out_file: mock_file_free(i915, file); out_rpm: @@ -1240,6 +1367,7 @@ int i915_request_live_selftests(struct drm_i915_private *i915) SUBTEST(live_nop_request), SUBTEST(live_all_engines), SUBTEST(live_sequential_engines), + SUBTEST(live_parallel_engines), SUBTEST(live_empty_request), SUBTEST(live_breadcrumbs_smoketest), }; diff --git a/drivers/gpu/drm/i915/selftests/i915_selftest.c b/drivers/gpu/drm/i915/selftests/i915_selftest.c index 438ea0eaa416..a6cca4ad96f6 100644 --- a/drivers/gpu/drm/i915/selftests/i915_selftest.c +++ b/drivers/gpu/drm/i915/selftests/i915_selftest.c @@ -23,13 +23,14 @@ #include <linux/random.h> -#include "../i915_drv.h" -#include "../i915_selftest.h" +#include "gt/intel_gt_pm.h" +#include "i915_drv.h" +#include "i915_selftest.h" #include "igt_flush_test.h" struct i915_selftest i915_selftest __read_mostly = { - .timeout_ms = 1000, + .timeout_ms = 500, }; int i915_mock_sanitycheck(void) @@ -256,6 +257,10 @@ int __i915_live_setup(void *data) { struct drm_i915_private *i915 = data; + /* The selftests expect an idle system */ + if (intel_gt_pm_wait_for_idle(&i915->gt)) + return -EIO; + return intel_gt_terminally_wedged(&i915->gt); } @@ -263,10 +268,8 @@ int __i915_live_teardown(int err, void *data) { struct drm_i915_private *i915 = data; - mutex_lock(&i915->drm.struct_mutex); - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(i915)) err = -EIO; - mutex_unlock(&i915->drm.struct_mutex); i915_gem_drain_freed_objects(i915); @@ -277,6 +280,10 @@ int __intel_gt_live_setup(void *data) { struct intel_gt *gt = data; + /* The selftests expect an idle system */ + if (intel_gt_pm_wait_for_idle(gt)) + return -EIO; + return intel_gt_terminally_wedged(gt); } @@ -284,10 +291,8 @@ int __intel_gt_live_teardown(int err, void *data) { struct intel_gt *gt = data; - mutex_lock(>->i915->drm.struct_mutex); - if (igt_flush_test(gt->i915, I915_WAIT_LOCKED)) + if (igt_flush_test(gt->i915)) err = -EIO; - mutex_unlock(>->i915->drm.struct_mutex); i915_gem_drain_freed_objects(gt->i915); diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index a5bec0a4cdcc..58b5f40a07dd 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -24,6 +24,7 @@ #include <linux/prime_numbers.h> +#include "gem/i915_gem_context.h" #include "gem/selftests/mock_context.h" #include "i915_scatterlist.h" @@ -38,7 +39,7 @@ static bool assert_vma(struct i915_vma *vma, { bool ok = true; - if (vma->vm != ctx->vm) { + if (vma->vm != rcu_access_pointer(ctx->vm)) { pr_err("VMA created with wrong VM\n"); ok = false; } @@ -113,11 +114,13 @@ static int create_vmas(struct drm_i915_private *i915, list_for_each_entry(obj, objects, st_link) { for (pinned = 0; pinned <= 1; pinned++) { list_for_each_entry(ctx, contexts, link) { - struct i915_address_space *vm = ctx->vm; + struct i915_address_space *vm; struct i915_vma *vma; int err; + vm = i915_gem_context_get_vm_rcu(ctx); vma = checked_vma_instance(obj, vm, NULL); + i915_vm_put(vm); if (IS_ERR(vma)) return PTR_ERR(vma); @@ -170,7 +173,7 @@ static int igt_vma_create(void *arg) } nc = 0; - for_each_prime_number(num_ctx, MAX_CONTEXT_HW_ID) { + for_each_prime_number(num_ctx, 2 * NUM_CONTEXT_TAG) { for (; nc < num_ctx; nc++) { ctx = mock_context(i915, "mock"); if (!ctx) @@ -623,7 +626,7 @@ static bool assert_partial(struct drm_i915_gem_object *obj, struct sgt_iter sgt; dma_addr_t dma; - for_each_sgt_dma(dma, sgt, vma->pages) { + for_each_sgt_daddr(dma, sgt, vma->pages) { dma_addr_t src; if (!size) { @@ -831,13 +834,10 @@ int i915_vma_mock_selftests(void) } mock_init_ggtt(i915, ggtt); - mutex_lock(&i915->drm.struct_mutex); err = i915_subtests(tests, ggtt); - mock_device_flush(i915); - mutex_unlock(&i915->drm.struct_mutex); + mock_device_flush(i915); i915_gem_drain_freed_objects(i915); - mock_fini_ggtt(ggtt); kfree(ggtt); out_put: @@ -879,8 +879,6 @@ static int igt_vma_remapped_gtt(void *arg) if (IS_ERR(obj)) return PTR_ERR(obj); - mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(&i915->runtime_pm); for (t = types; *t; t++) { @@ -976,7 +974,6 @@ static int igt_vma_remapped_gtt(void *arg) out: intel_runtime_pm_put(&i915->runtime_pm, wakeref); - mutex_unlock(&i915->drm.struct_mutex); i915_gem_object_put(obj); return err; diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.c b/drivers/gpu/drm/i915/selftests/igt_flush_test.c index d3b5eb402d33..7b0939e3f007 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.c @@ -4,39 +4,32 @@ * Copyright © 2018 Intel Corporation */ -#include "gem/i915_gem_context.h" #include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" #include "i915_drv.h" #include "i915_selftest.h" #include "igt_flush_test.h" -int igt_flush_test(struct drm_i915_private *i915, unsigned int flags) +int igt_flush_test(struct drm_i915_private *i915) { - int ret = intel_gt_is_wedged(&i915->gt) ? -EIO : 0; - int repeat = !!(flags & I915_WAIT_LOCKED); + struct intel_gt *gt = &i915->gt; + int ret = intel_gt_is_wedged(gt) ? -EIO : 0; cond_resched(); - do { - if (i915_gem_wait_for_idle(i915, flags, HZ / 5) == -ETIME) { - pr_err("%pS timed out, cancelling all further testing.\n", - __builtin_return_address(0)); + if (intel_gt_wait_for_idle(gt, HZ / 5) == -ETIME) { + pr_err("%pS timed out, cancelling all further testing.\n", + __builtin_return_address(0)); - GEM_TRACE("%pS timed out.\n", - __builtin_return_address(0)); - GEM_TRACE_DUMP(); + GEM_TRACE("%pS timed out.\n", + __builtin_return_address(0)); + GEM_TRACE_DUMP(); - intel_gt_set_wedged(&i915->gt); - repeat = 0; - ret = -EIO; - } - - /* Ensure we also flush after wedging. */ - if (flags & I915_WAIT_LOCKED) - i915_retire_requests(i915); - } while (repeat--); + intel_gt_set_wedged(gt); + ret = -EIO; + } return ret; } diff --git a/drivers/gpu/drm/i915/selftests/igt_flush_test.h b/drivers/gpu/drm/i915/selftests/igt_flush_test.h index 63e009927c43..7541fa74e641 100644 --- a/drivers/gpu/drm/i915/selftests/igt_flush_test.h +++ b/drivers/gpu/drm/i915/selftests/igt_flush_test.h @@ -9,6 +9,6 @@ struct drm_i915_private; -int igt_flush_test(struct drm_i915_private *i915, unsigned int flags); +int igt_flush_test(struct drm_i915_private *i915); #endif /* IGT_FLUSH_TEST_H */ diff --git a/drivers/gpu/drm/i915/selftests/igt_live_test.c b/drivers/gpu/drm/i915/selftests/igt_live_test.c index 3e902761cd16..c130010a7033 100644 --- a/drivers/gpu/drm/i915/selftests/igt_live_test.c +++ b/drivers/gpu/drm/i915/selftests/igt_live_test.c @@ -4,7 +4,8 @@ * Copyright © 2018 Intel Corporation */ -#include "../i915_drv.h" +#include "i915_drv.h" +#include "gt/intel_gt_requests.h" #include "../i915_selftest.h" #include "igt_flush_test.h" @@ -15,20 +16,16 @@ int igt_live_test_begin(struct igt_live_test *t, const char *func, const char *name) { + struct intel_gt *gt = &i915->gt; struct intel_engine_cs *engine; enum intel_engine_id id; int err; - lockdep_assert_held(&i915->drm.struct_mutex); - t->i915 = i915; t->func = func; t->name = name; - err = i915_gem_wait_for_idle(i915, - I915_WAIT_INTERRUPTIBLE | - I915_WAIT_LOCKED, - MAX_SCHEDULE_TIMEOUT); + err = intel_gt_wait_for_idle(gt, MAX_SCHEDULE_TIMEOUT); if (err) { pr_err("%s(%s): failed to idle before, with err=%d!", func, name, err); @@ -37,7 +34,7 @@ int igt_live_test_begin(struct igt_live_test *t, t->reset_global = i915_reset_count(&i915->gpu_error); - for_each_engine(engine, i915, id) + for_each_engine(engine, gt, id) t->reset_engine[id] = i915_reset_engine_count(&i915->gpu_error, engine); @@ -50,9 +47,7 @@ int igt_live_test_end(struct igt_live_test *t) struct intel_engine_cs *engine; enum intel_engine_id id; - lockdep_assert_held(&i915->drm.struct_mutex); - - if (igt_flush_test(i915, I915_WAIT_LOCKED)) + if (igt_flush_test(i915)) return -EIO; if (t->reset_global != i915_reset_count(&i915->gpu_error)) { @@ -62,7 +57,7 @@ int igt_live_test_end(struct igt_live_test *t) return -EIO; } - for_each_engine(engine, i915, id) { + for_each_engine(engine, &i915->gt, id) { if (t->reset_engine[id] == i915_reset_engine_count(&i915->gpu_error, engine)) continue; diff --git a/drivers/gpu/drm/i915/selftests/igt_reset.c b/drivers/gpu/drm/i915/selftests/igt_reset.c index 7ec8f8b049c6..9f8590b868a9 100644 --- a/drivers/gpu/drm/i915/selftests/igt_reset.c +++ b/drivers/gpu/drm/i915/selftests/igt_reset.c @@ -22,7 +22,7 @@ void igt_global_reset_lock(struct intel_gt *gt) wait_event(gt->reset.queue, !test_bit(I915_RESET_BACKOFF, >->reset.flags)); - for_each_engine(engine, gt->i915, id) { + for_each_engine(engine, gt, id) { while (test_and_set_bit(I915_RESET_ENGINE + id, >->reset.flags)) wait_on_bit(>->reset.flags, I915_RESET_ENGINE + id, @@ -35,7 +35,7 @@ void igt_global_reset_unlock(struct intel_gt *gt) struct intel_engine_cs *engine; enum intel_engine_id id; - for_each_engine(engine, gt->i915, id) + for_each_engine(engine, gt, id) clear_bit(I915_RESET_ENGINE + id, >->reset.flags); clear_bit(I915_RESET_BACKOFF, >->reset.flags); diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c index 11f04ad48e68..ee8450b871da 100644 --- a/drivers/gpu/drm/i915/selftests/igt_spinner.c +++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c @@ -147,7 +147,7 @@ igt_spinner_create_request(struct igt_spinner *spin, intel_gt_chipset_flush(engine->gt); if (engine->emit_init_breadcrumb && - rq->timeline->has_initial_breadcrumb) { + i915_request_timeline(rq)->has_initial_breadcrumb) { err = engine->emit_init_breadcrumb(rq); if (err) goto cancel_rq; diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c new file mode 100644 index 000000000000..19e1cca8f143 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c @@ -0,0 +1,624 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include <linux/prime_numbers.h> + +#include "../i915_selftest.h" + +#include "mock_drm.h" +#include "mock_gem_device.h" +#include "mock_region.h" + +#include "gem/i915_gem_context.h" +#include "gem/i915_gem_lmem.h" +#include "gem/i915_gem_region.h" +#include "gem/i915_gem_object_blt.h" +#include "gem/selftests/igt_gem_utils.h" +#include "gem/selftests/mock_context.h" +#include "gt/intel_engine_user.h" +#include "gt/intel_gt.h" +#include "selftests/igt_flush_test.h" +#include "selftests/i915_random.h" + +static void close_objects(struct intel_memory_region *mem, + struct list_head *objects) +{ + struct drm_i915_private *i915 = mem->i915; + struct drm_i915_gem_object *obj, *on; + + list_for_each_entry_safe(obj, on, objects, st_link) { + if (i915_gem_object_has_pinned_pages(obj)) + i915_gem_object_unpin_pages(obj); + /* No polluting the memory region between tests */ + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + list_del(&obj->st_link); + i915_gem_object_put(obj); + } + + cond_resched(); + + i915_gem_drain_freed_objects(i915); +} + +static int igt_mock_fill(void *arg) +{ + struct intel_memory_region *mem = arg; + resource_size_t total = resource_size(&mem->region); + resource_size_t page_size; + resource_size_t rem; + unsigned long max_pages; + unsigned long page_num; + LIST_HEAD(objects); + int err = 0; + + page_size = mem->mm.chunk_size; + max_pages = div64_u64(total, page_size); + rem = total; + + for_each_prime_number_from(page_num, 1, max_pages) { + resource_size_t size = page_num * page_size; + struct drm_i915_gem_object *obj; + + obj = i915_gem_object_create_region(mem, size, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + break; + } + + err = i915_gem_object_pin_pages(obj); + if (err) { + i915_gem_object_put(obj); + break; + } + + list_add(&obj->st_link, &objects); + rem -= size; + } + + if (err == -ENOMEM) + err = 0; + if (err == -ENXIO) { + if (page_num * page_size <= rem) { + pr_err("%s failed, space still left in region\n", + __func__); + err = -EINVAL; + } else { + err = 0; + } + } + + close_objects(mem, &objects); + + return err; +} + +static struct drm_i915_gem_object * +igt_object_create(struct intel_memory_region *mem, + struct list_head *objects, + u64 size, + unsigned int flags) +{ + struct drm_i915_gem_object *obj; + int err; + + obj = i915_gem_object_create_region(mem, size, flags); + if (IS_ERR(obj)) + return obj; + + err = i915_gem_object_pin_pages(obj); + if (err) + goto put; + + list_add(&obj->st_link, objects); + return obj; + +put: + i915_gem_object_put(obj); + return ERR_PTR(err); +} + +static void igt_object_release(struct drm_i915_gem_object *obj) +{ + i915_gem_object_unpin_pages(obj); + __i915_gem_object_put_pages(obj, I915_MM_NORMAL); + list_del(&obj->st_link); + i915_gem_object_put(obj); +} + +static int igt_mock_contiguous(void *arg) +{ + struct intel_memory_region *mem = arg; + struct drm_i915_gem_object *obj; + unsigned long n_objects; + LIST_HEAD(objects); + LIST_HEAD(holes); + I915_RND_STATE(prng); + resource_size_t total; + resource_size_t min; + u64 target; + int err = 0; + + total = resource_size(&mem->region); + + /* Min size */ + obj = igt_object_create(mem, &objects, mem->mm.chunk_size, + I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + if (obj->mm.pages->nents != 1) { + pr_err("%s min object spans multiple sg entries\n", __func__); + err = -EINVAL; + goto err_close_objects; + } + + igt_object_release(obj); + + /* Max size */ + obj = igt_object_create(mem, &objects, total, I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + if (obj->mm.pages->nents != 1) { + pr_err("%s max object spans multiple sg entries\n", __func__); + err = -EINVAL; + goto err_close_objects; + } + + igt_object_release(obj); + + /* Internal fragmentation should not bleed into the object size */ + target = i915_prandom_u64_state(&prng); + div64_u64_rem(target, total, &target); + target = round_up(target, PAGE_SIZE); + target = max_t(u64, PAGE_SIZE, target); + + obj = igt_object_create(mem, &objects, target, + I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + if (obj->base.size != target) { + pr_err("%s obj->base.size(%zx) != target(%llx)\n", __func__, + obj->base.size, target); + err = -EINVAL; + goto err_close_objects; + } + + if (obj->mm.pages->nents != 1) { + pr_err("%s object spans multiple sg entries\n", __func__); + err = -EINVAL; + goto err_close_objects; + } + + igt_object_release(obj); + + /* + * Try to fragment the address space, such that half of it is free, but + * the max contiguous block size is SZ_64K. + */ + + target = SZ_64K; + n_objects = div64_u64(total, target); + + while (n_objects--) { + struct list_head *list; + + if (n_objects % 2) + list = &holes; + else + list = &objects; + + obj = igt_object_create(mem, list, target, + I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; + } + } + + close_objects(mem, &holes); + + min = target; + target = total >> 1; + + /* Make sure we can still allocate all the fragmented space */ + obj = igt_object_create(mem, &objects, target, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto err_close_objects; + } + + igt_object_release(obj); + + /* + * Even though we have enough free space, we don't have a big enough + * contiguous block. Make sure that holds true. + */ + + do { + bool should_fail = target > min; + + obj = igt_object_create(mem, &objects, target, + I915_BO_ALLOC_CONTIGUOUS); + if (should_fail != IS_ERR(obj)) { + pr_err("%s target allocation(%llx) mismatch\n", + __func__, target); + err = -EINVAL; + goto err_close_objects; + } + + target >>= 1; + } while (target >= mem->mm.chunk_size); + +err_close_objects: + list_splice_tail(&holes, &objects); + close_objects(mem, &objects); + return err; +} + +static int igt_gpu_write_dw(struct intel_context *ce, + struct i915_vma *vma, + u32 dword, + u32 value) +{ + return igt_gpu_fill_dw(ce, vma, dword * sizeof(u32), + vma->size >> PAGE_SHIFT, value); +} + +static int igt_cpu_check(struct drm_i915_gem_object *obj, u32 dword, u32 val) +{ + unsigned long n; + int err; + + i915_gem_object_lock(obj); + err = i915_gem_object_set_to_wc_domain(obj, false); + i915_gem_object_unlock(obj); + if (err) + return err; + + err = i915_gem_object_pin_pages(obj); + if (err) + return err; + + for (n = 0; n < obj->base.size >> PAGE_SHIFT; ++n) { + u32 __iomem *base; + u32 read_val; + + base = i915_gem_object_lmem_io_map_page_atomic(obj, n); + + read_val = ioread32(base + dword); + io_mapping_unmap_atomic(base); + if (read_val != val) { + pr_err("n=%lu base[%u]=%u, val=%u\n", + n, dword, read_val, val); + err = -EINVAL; + break; + } + } + + i915_gem_object_unpin_pages(obj); + return err; +} + +static int igt_gpu_write(struct i915_gem_context *ctx, + struct drm_i915_gem_object *obj) +{ + struct i915_gem_engines *engines; + struct i915_gem_engines_iter it; + struct i915_address_space *vm; + struct intel_context *ce; + I915_RND_STATE(prng); + IGT_TIMEOUT(end_time); + unsigned int count; + struct i915_vma *vma; + int *order; + int i, n; + int err = 0; + + GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj)); + + n = 0; + count = 0; + for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) { + count++; + if (!intel_engine_can_store_dword(ce->engine)) + continue; + + vm = ce->vm; + n++; + } + i915_gem_context_unlock_engines(ctx); + if (!n) + return 0; + + order = i915_random_order(count * count, &prng); + if (!order) + return -ENOMEM; + + vma = i915_vma_instance(obj, vm, NULL); + if (IS_ERR(vma)) { + err = PTR_ERR(vma); + goto out_free; + } + + err = i915_vma_pin(vma, 0, 0, PIN_USER); + if (err) + goto out_free; + + i = 0; + engines = i915_gem_context_lock_engines(ctx); + do { + u32 rng = prandom_u32_state(&prng); + u32 dword = offset_in_page(rng) / 4; + + ce = engines->engines[order[i] % engines->num_engines]; + i = (i + 1) % (count * count); + if (!ce || !intel_engine_can_store_dword(ce->engine)) + continue; + + err = igt_gpu_write_dw(ce, vma, dword, rng); + if (err) + break; + + err = igt_cpu_check(obj, dword, rng); + if (err) + break; + } while (!__igt_timeout(end_time, NULL)); + i915_gem_context_unlock_engines(ctx); + +out_free: + kfree(order); + + if (err == -ENOMEM) + err = 0; + + return err; +} + +static int igt_lmem_create(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + int err = 0; + + obj = i915_gem_object_create_lmem(i915, PAGE_SIZE, 0); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_put; + + i915_gem_object_unpin_pages(obj); +out_put: + i915_gem_object_put(obj); + + return err; +} + +static int igt_lmem_write_gpu(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + struct i915_gem_context *ctx; + struct drm_file *file; + I915_RND_STATE(prng); + u32 sz; + int err; + + file = mock_file(i915); + if (IS_ERR(file)) + return PTR_ERR(file); + + ctx = live_context(i915, file); + if (IS_ERR(ctx)) { + err = PTR_ERR(ctx); + goto out_file; + } + + sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE); + + obj = i915_gem_object_create_lmem(i915, sz, 0); + if (IS_ERR(obj)) { + err = PTR_ERR(obj); + goto out_file; + } + + err = i915_gem_object_pin_pages(obj); + if (err) + goto out_put; + + err = igt_gpu_write(ctx, obj); + if (err) + pr_err("igt_gpu_write failed(%d)\n", err); + + i915_gem_object_unpin_pages(obj); +out_put: + i915_gem_object_put(obj); +out_file: + mock_file_free(i915, file); + return err; +} + +static struct intel_engine_cs * +random_engine_class(struct drm_i915_private *i915, + unsigned int class, + struct rnd_state *prng) +{ + struct intel_engine_cs *engine; + unsigned int count; + + count = 0; + for (engine = intel_engine_lookup_user(i915, class, 0); + engine && engine->uabi_class == class; + engine = rb_entry_safe(rb_next(&engine->uabi_node), + typeof(*engine), uabi_node)) + count++; + + count = i915_prandom_u32_max_state(count, prng); + return intel_engine_lookup_user(i915, class, count); +} + +static int igt_lmem_write_cpu(void *arg) +{ + struct drm_i915_private *i915 = arg; + struct drm_i915_gem_object *obj; + I915_RND_STATE(prng); + IGT_TIMEOUT(end_time); + u32 bytes[] = { + 0, /* rng placeholder */ + sizeof(u32), + sizeof(u64), + 64, /* cl */ + PAGE_SIZE, + PAGE_SIZE - sizeof(u32), + PAGE_SIZE - sizeof(u64), + PAGE_SIZE - 64, + }; + struct intel_engine_cs *engine; + u32 *vaddr; + u32 sz; + u32 i; + int *order; + int count; + int err; + + engine = random_engine_class(i915, I915_ENGINE_CLASS_COPY, &prng); + if (!engine) + return 0; + + pr_info("%s: using %s\n", __func__, engine->name); + + sz = round_up(prandom_u32_state(&prng) % SZ_32M, PAGE_SIZE); + sz = max_t(u32, 2 * PAGE_SIZE, sz); + + obj = i915_gem_object_create_lmem(i915, sz, I915_BO_ALLOC_CONTIGUOUS); + if (IS_ERR(obj)) + return PTR_ERR(obj); + + vaddr = i915_gem_object_pin_map(obj, I915_MAP_WC); + if (IS_ERR(vaddr)) { + err = PTR_ERR(vaddr); + goto out_put; + } + + /* Put the pages into a known state -- from the gpu for added fun */ + err = i915_gem_object_fill_blt(obj, engine->kernel_context, 0xdeadbeaf); + if (err) + goto out_unpin; + + i915_gem_object_lock(obj); + err = i915_gem_object_set_to_wc_domain(obj, true); + i915_gem_object_unlock(obj); + if (err) + goto out_unpin; + + count = ARRAY_SIZE(bytes); + order = i915_random_order(count * count, &prng); + if (!order) { + err = -ENOMEM; + goto out_unpin; + } + + /* We want to throw in a random width/align */ + bytes[0] = igt_random_offset(&prng, 0, PAGE_SIZE, sizeof(u32), + sizeof(u32)); + + i = 0; + do { + u32 offset; + u32 align; + u32 dword; + u32 size; + u32 val; + + size = bytes[order[i] % count]; + i = (i + 1) % (count * count); + + align = bytes[order[i] % count]; + i = (i + 1) % (count * count); + + align = max_t(u32, sizeof(u32), rounddown_pow_of_two(align)); + + offset = igt_random_offset(&prng, 0, obj->base.size, + size, align); + + val = prandom_u32_state(&prng); + memset32(vaddr + offset / sizeof(u32), val ^ 0xdeadbeaf, + size / sizeof(u32)); + + /* + * Sample random dw -- don't waste precious time reading every + * single dw. + */ + dword = igt_random_offset(&prng, offset, + offset + size, + sizeof(u32), sizeof(u32)); + dword /= sizeof(u32); + if (vaddr[dword] != (val ^ 0xdeadbeaf)) { + pr_err("%s vaddr[%u]=%u, val=%u, size=%u, align=%u, offset=%u\n", + __func__, dword, vaddr[dword], val ^ 0xdeadbeaf, + size, align, offset); + err = -EINVAL; + break; + } + } while (!__igt_timeout(end_time, NULL)); + +out_unpin: + i915_gem_object_unpin_map(obj); +out_put: + i915_gem_object_put(obj); + + return err; +} + +int intel_memory_region_mock_selftests(void) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_mock_fill), + SUBTEST(igt_mock_contiguous), + }; + struct intel_memory_region *mem; + struct drm_i915_private *i915; + int err; + + i915 = mock_gem_device(); + if (!i915) + return -ENOMEM; + + mem = mock_region_create(i915, 0, SZ_2G, I915_GTT_PAGE_SIZE_4K, 0); + if (IS_ERR(mem)) { + pr_err("failed to create memory region\n"); + err = PTR_ERR(mem); + goto out_unref; + } + + err = i915_subtests(tests, mem); + + intel_memory_region_put(mem); +out_unref: + drm_dev_put(&i915->drm); + return err; +} + +int intel_memory_region_live_selftests(struct drm_i915_private *i915) +{ + static const struct i915_subtest tests[] = { + SUBTEST(igt_lmem_create), + SUBTEST(igt_lmem_write_cpu), + SUBTEST(igt_lmem_write_gpu), + }; + + if (!HAS_LMEM(i915)) { + pr_info("device lacks LMEM support, skipping\n"); + return 0; + } + + if (intel_gt_is_wedged(&i915->gt)) + return 0; + + return i915_live_subtests(tests, i915); +} diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 86815c6072a1..0e4e6be0101d 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -67,6 +67,7 @@ static int intel_shadow_table_check(void) } reg_lists[] = { { gen8_shadowed_regs, ARRAY_SIZE(gen8_shadowed_regs) }, { gen11_shadowed_regs, ARRAY_SIZE(gen11_shadowed_regs) }, + { gen12_shadowed_regs, ARRAY_SIZE(gen12_shadowed_regs) }, }; const i915_reg_t *reg; unsigned int i, j; @@ -101,6 +102,7 @@ int intel_uncore_mock_selftests(void) { __chv_fw_ranges, ARRAY_SIZE(__chv_fw_ranges), false }, { __gen9_fw_ranges, ARRAY_SIZE(__gen9_fw_ranges), true }, { __gen11_fw_ranges, ARRAY_SIZE(__gen11_fw_ranges), true }, + { __gen12_fw_ranges, ARRAY_SIZE(__gen12_fw_ranges), true }, }; int err, i; @@ -138,19 +140,19 @@ static int live_forcewake_ops(void *arg) } }; const struct reg *r; - struct drm_i915_private *i915 = arg; + struct intel_gt *gt = arg; struct intel_uncore_forcewake_domain *domain; - struct intel_uncore *uncore = &i915->uncore; + struct intel_uncore *uncore = gt->uncore; struct intel_engine_cs *engine; enum intel_engine_id id; intel_wakeref_t wakeref; unsigned int tmp; int err = 0; - GEM_BUG_ON(i915->gt.awake); + GEM_BUG_ON(gt->awake); /* vlv/chv with their pcu behave differently wrt reads */ - if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) { + if (IS_VALLEYVIEW(gt->i915) || IS_CHERRYVIEW(gt->i915)) { pr_debug("PCU fakes forcewake badly; skipping\n"); return 0; } @@ -168,15 +170,15 @@ static int live_forcewake_ops(void *arg) /* We have to pick carefully to get the exact behaviour we need */ for (r = registers; r->name; r++) - if (r->platforms & INTEL_INFO(i915)->gen_mask) + if (r->platforms & INTEL_INFO(gt->i915)->gen_mask) break; if (!r->name) { pr_debug("Forcewaked register not known for %s; skipping\n", - intel_platform_name(INTEL_INFO(i915)->platform)); + intel_platform_name(INTEL_INFO(gt->i915)->platform)); return 0; } - wakeref = intel_runtime_pm_get(&i915->runtime_pm); + wakeref = intel_runtime_pm_get(uncore->rpm); for_each_fw_domain(domain, uncore, tmp) { smp_store_mb(domain->active, false); @@ -186,7 +188,7 @@ static int live_forcewake_ops(void *arg) intel_uncore_fw_release_timer(&domain->timer); } - for_each_engine(engine, i915, id) { + for_each_engine(engine, gt, id) { i915_reg_t mmio = _MMIO(engine->mmio_base + r->offset); u32 __iomem *reg = uncore->regs + engine->mmio_base + r->offset; enum forcewake_domains fw_domains; @@ -247,22 +249,22 @@ static int live_forcewake_ops(void *arg) } out_rpm: - intel_runtime_pm_put(&i915->runtime_pm, wakeref); + intel_runtime_pm_put(uncore->rpm, wakeref); return err; } static int live_forcewake_domains(void *arg) { #define FW_RANGE 0x40000 - struct drm_i915_private *dev_priv = arg; - struct intel_uncore *uncore = &dev_priv->uncore; + struct intel_gt *gt = arg; + struct intel_uncore *uncore = gt->uncore; unsigned long *valid; u32 offset; int err; - if (!HAS_FPGA_DBG_UNCLAIMED(dev_priv) && - !IS_VALLEYVIEW(dev_priv) && - !IS_CHERRYVIEW(dev_priv)) + if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) && + !IS_VALLEYVIEW(gt->i915) && + !IS_CHERRYVIEW(gt->i915)) return 0; /* @@ -281,7 +283,7 @@ static int live_forcewake_domains(void *arg) for (offset = 0; offset < FW_RANGE; offset += 4) { i915_reg_t reg = { offset }; - (void)I915_READ_FW(reg); + intel_uncore_posting_read_fw(uncore, reg); if (!check_for_unclaimed_mmio(uncore)) set_bit(offset, valid); } @@ -298,7 +300,7 @@ static int live_forcewake_domains(void *arg) check_for_unclaimed_mmio(uncore); - (void)I915_READ(reg); + intel_uncore_posting_read_fw(uncore, reg); if (check_for_unclaimed_mmio(uncore)) { pr_err("Unclaimed mmio read to register 0x%04x\n", offset); @@ -310,21 +312,23 @@ static int live_forcewake_domains(void *arg) return err; } +static int live_fw_table(void *arg) +{ + struct intel_gt *gt = arg; + + /* Confirm the table we load is still valid */ + return intel_fw_table_check(gt->uncore->fw_domains_table, + gt->uncore->fw_domains_table_entries, + INTEL_GEN(gt->i915) >= 9); +} + int intel_uncore_live_selftests(struct drm_i915_private *i915) { static const struct i915_subtest tests[] = { + SUBTEST(live_fw_table), SUBTEST(live_forcewake_ops), SUBTEST(live_forcewake_domains), }; - int err; - - /* Confirm the table we load is still valid */ - err = intel_fw_table_check(i915->uncore.fw_domains_table, - i915->uncore.fw_domains_table_entries, - INTEL_GEN(i915) >= 9); - if (err) - return err; - - return i915_subtests(tests, i915); + return intel_gt_live_subtests(tests, &i915->gt); } diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index 01a89c071bf5..27ed3cee6a9b 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -26,27 +26,29 @@ #include <linux/pm_runtime.h> #include "gt/intel_gt.h" +#include "gt/intel_gt_requests.h" #include "gt/mock_engine.h" +#include "intel_memory_region.h" #include "mock_request.h" #include "mock_gem_device.h" #include "mock_gtt.h" #include "mock_uncore.h" +#include "mock_region.h" #include "gem/selftests/mock_context.h" #include "gem/selftests/mock_gem_object.h" void mock_device_flush(struct drm_i915_private *i915) { + struct intel_gt *gt = &i915->gt; struct intel_engine_cs *engine; enum intel_engine_id id; - lockdep_assert_held(&i915->drm.struct_mutex); - do { - for_each_engine(engine, i915, id) + for_each_engine(engine, gt, id) mock_engine_flush(engine); - } while (i915_retire_requests(i915)); + } while (intel_gt_retire_requests_timeout(gt, MAX_SCHEDULE_TIMEOUT)); } static void mock_device_release(struct drm_device *dev) @@ -55,31 +57,23 @@ static void mock_device_release(struct drm_device *dev) struct intel_engine_cs *engine; enum intel_engine_id id; - mutex_lock(&i915->drm.struct_mutex); mock_device_flush(i915); - mutex_unlock(&i915->drm.struct_mutex); - flush_work(&i915->gem.idle_work); i915_gem_drain_workqueue(i915); - mutex_lock(&i915->drm.struct_mutex); - for_each_engine(engine, i915, id) + for_each_engine(engine, &i915->gt, id) mock_engine_free(engine); - i915_gem_contexts_fini(i915); - mutex_unlock(&i915->drm.struct_mutex); + i915_gem_driver_release__contexts(i915); intel_timelines_fini(i915); drain_workqueue(i915->wq); i915_gem_drain_freed_objects(i915); - mutex_lock(&i915->drm.struct_mutex); mock_fini_ggtt(&i915->ggtt); - mutex_unlock(&i915->drm.struct_mutex); - destroy_workqueue(i915->wq); - i915_gemfs_fini(i915); + intel_memory_regions_driver_release(i915); drm_mode_config_cleanup(&i915->drm); @@ -103,14 +97,6 @@ static void release_dev(struct device *dev) kfree(pdev); } -static void mock_retire_work_handler(struct work_struct *work) -{ -} - -static void mock_idle_work_handler(struct work_struct *work) -{ -} - static int pm_domain_resume(struct device *dev) { return pm_generic_runtime_resume(dev); @@ -178,10 +164,15 @@ struct drm_i915_private *mock_gem_device(void) I915_GTT_PAGE_SIZE_64K | I915_GTT_PAGE_SIZE_2M; - mock_uncore_init(&i915->uncore); + mkwrite_device_info(i915)->memory_regions = REGION_SMEM; + intel_memory_regions_hw_probe(i915); + + mock_uncore_init(&i915->uncore, i915); + i915_gem_init__mm(i915); intel_gt_init_early(&i915->gt, i915); atomic_inc(&i915->gt.wakeref.count); /* disable; no hw support */ + i915->gt.awake = -ENODEV; i915->wq = alloc_ordered_workqueue("mock", 0); if (!i915->wq) @@ -189,15 +180,8 @@ struct drm_i915_private *mock_gem_device(void) mock_init_contexts(i915); - INIT_DELAYED_WORK(&i915->gem.retire_work, mock_retire_work_handler); - INIT_WORK(&i915->gem.idle_work, mock_idle_work_handler); - - i915->gt.awake = true; - intel_timelines_init(i915); - mutex_lock(&i915->drm.struct_mutex); - mock_init_ggtt(i915, &i915->ggtt); mkwrite_device_info(i915)->engine_mask = BIT(0); @@ -214,21 +198,18 @@ struct drm_i915_private *mock_gem_device(void) goto err_context; intel_engines_driver_register(i915); - mutex_unlock(&i915->drm.struct_mutex); - - WARN_ON(i915_gemfs_init(i915)); return i915; err_context: - i915_gem_contexts_fini(i915); + i915_gem_driver_release__contexts(i915); err_engine: mock_engine_free(i915->engine[RCS0]); err_unlock: - mutex_unlock(&i915->drm.struct_mutex); intel_timelines_fini(i915); destroy_workqueue(i915->wq); err_drv: + intel_memory_regions_driver_release(i915); drm_mode_config_cleanup(&i915->drm); drm_dev_fini(&i915->drm); put_device: diff --git a/drivers/gpu/drm/i915/selftests/mock_gtt.c b/drivers/gpu/drm/i915/selftests/mock_gtt.c index e62a67e0f79c..20ac3844edec 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gtt.c +++ b/drivers/gpu/drm/i915/selftests/mock_gtt.c @@ -43,7 +43,7 @@ static int mock_bind_ppgtt(struct i915_vma *vma, u32 flags) { GEM_BUG_ON(flags & I915_VMA_GLOBAL_BIND); - vma->flags |= I915_VMA_LOCAL_BIND; + set_bit(I915_VMA_LOCAL_BIND_BIT, __i915_vma_flags(vma)); return 0; } @@ -63,6 +63,7 @@ struct i915_ppgtt *mock_ppgtt(struct drm_i915_private *i915, const char *name) if (!ppgtt) return NULL; + ppgtt->vm.gt = &i915->gt; ppgtt->vm.i915 = i915; ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); ppgtt->vm.file = ERR_PTR(-ENODEV); @@ -86,7 +87,7 @@ static int mock_bind_ggtt(struct i915_vma *vma, enum i915_cache_level cache_level, u32 flags) { - vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; + atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags); return 0; } @@ -117,8 +118,7 @@ void mock_init_ggtt(struct drm_i915_private *i915, struct i915_ggtt *ggtt) ggtt->vm.vma_ops.clear_pages = clear_pages; i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); - - intel_gt_init_hw(i915); + i915->gt.ggtt = ggtt; } void mock_fini_ggtt(struct i915_ggtt *ggtt) diff --git a/drivers/gpu/drm/i915/selftests/mock_region.c b/drivers/gpu/drm/i915/selftests/mock_region.c new file mode 100644 index 000000000000..b2ad41c27e67 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/mock_region.c @@ -0,0 +1,60 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2019 Intel Corporation + */ + +#include "gem/i915_gem_region.h" +#include "intel_memory_region.h" + +#include "mock_region.h" + +static const struct drm_i915_gem_object_ops mock_region_obj_ops = { + .get_pages = i915_gem_object_get_pages_buddy, + .put_pages = i915_gem_object_put_pages_buddy, + .release = i915_gem_object_release_memory_region, +}; + +static struct drm_i915_gem_object * +mock_object_create(struct intel_memory_region *mem, + resource_size_t size, + unsigned int flags) +{ + static struct lock_class_key lock_class; + struct drm_i915_private *i915 = mem->i915; + struct drm_i915_gem_object *obj; + + if (size > BIT(mem->mm.max_order) * mem->mm.chunk_size) + return ERR_PTR(-E2BIG); + + obj = i915_gem_object_alloc(); + if (!obj) + return ERR_PTR(-ENOMEM); + + drm_gem_private_object_init(&i915->drm, &obj->base, size); + i915_gem_object_init(obj, &mock_region_obj_ops, &lock_class); + + obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; + + i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE); + + i915_gem_object_init_memory_region(obj, mem, flags); + + return obj; +} + +static const struct intel_memory_region_ops mock_region_ops = { + .init = intel_memory_region_init_buddy, + .release = intel_memory_region_release_buddy, + .create_object = mock_object_create, +}; + +struct intel_memory_region * +mock_region_create(struct drm_i915_private *i915, + resource_size_t start, + resource_size_t size, + resource_size_t min_page_size, + resource_size_t io_start) +{ + return intel_memory_region_create(i915, start, size, min_page_size, + io_start, &mock_region_ops); +} diff --git a/drivers/gpu/drm/i915/selftests/mock_region.h b/drivers/gpu/drm/i915/selftests/mock_region.h new file mode 100644 index 000000000000..24608089d833 --- /dev/null +++ b/drivers/gpu/drm/i915/selftests/mock_region.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2019 Intel Corporation + */ + +#ifndef __MOCK_REGION_H +#define __MOCK_REGION_H + +struct intel_memory_region * +mock_region_create(struct drm_i915_private *i915, + resource_size_t start, + resource_size_t size, + resource_size_t min_page_size, + resource_size_t io_start); + +#endif /* !__MOCK_REGION_H */ diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.c b/drivers/gpu/drm/i915/selftests/mock_uncore.c index 49585f16d4a2..ca57e4008701 100644 --- a/drivers/gpu/drm/i915/selftests/mock_uncore.c +++ b/drivers/gpu/drm/i915/selftests/mock_uncore.c @@ -39,8 +39,11 @@ __nop_read(16) __nop_read(32) __nop_read(64) -void mock_uncore_init(struct intel_uncore *uncore) +void mock_uncore_init(struct intel_uncore *uncore, + struct drm_i915_private *i915) { + intel_uncore_init_early(uncore, i915); + ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop); ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop); } diff --git a/drivers/gpu/drm/i915/selftests/mock_uncore.h b/drivers/gpu/drm/i915/selftests/mock_uncore.h index dacb36b5ffcd..8a2cc553f466 100644 --- a/drivers/gpu/drm/i915/selftests/mock_uncore.h +++ b/drivers/gpu/drm/i915/selftests/mock_uncore.h @@ -25,6 +25,7 @@ #ifndef __MOCK_UNCORE_H #define __MOCK_UNCORE_H -void mock_uncore_init(struct intel_uncore *uncore); +void mock_uncore_init(struct intel_uncore *uncore, + struct drm_i915_private *i915); #endif /* !__MOCK_UNCORE_H */ diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c index 695f307f36b2..208069faf183 100644 --- a/drivers/gpu/drm/imx/imx-ldb.c +++ b/drivers/gpu/drm/imx/imx-ldb.c @@ -20,6 +20,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_fb_helper.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index e7ce17503ae1..35518e5de356 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -13,6 +13,7 @@ #include <video/of_display_timing.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_fb_helper.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c index 2e2ed653e9c6..ec32e1c67335 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm.c @@ -13,6 +13,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> @@ -676,8 +677,8 @@ static int ingenic_drm_probe(struct platform_device *pdev) } if (panel) - bridge = devm_drm_panel_bridge_add(dev, panel, - DRM_MODE_CONNECTOR_DPI); + bridge = devm_drm_panel_bridge_add_typed(dev, panel, + DRM_MODE_CONNECTOR_DPI); priv->dma_hwdesc = dma_alloc_coherent(dev, sizeof(*priv->dma_hwdesc), &priv->dma_hwdesc_phys, diff --git a/drivers/gpu/drm/lima/Kconfig b/drivers/gpu/drm/lima/Kconfig index bb4ddc6bb0a6..571dc369a7e9 100644 --- a/drivers/gpu/drm/lima/Kconfig +++ b/drivers/gpu/drm/lima/Kconfig @@ -9,5 +9,6 @@ config DRM_LIMA depends on COMMON_CLK depends on OF select DRM_SCHED + select DRM_GEM_SHMEM_HELPER help DRM driver for ARM Mali 400/450 GPUs. diff --git a/drivers/gpu/drm/lima/Makefile b/drivers/gpu/drm/lima/Makefile index 38cc70281ba5..a85444b0a1d4 100644 --- a/drivers/gpu/drm/lima/Makefile +++ b/drivers/gpu/drm/lima/Makefile @@ -13,9 +13,7 @@ lima-y := \ lima_vm.o \ lima_sched.o \ lima_ctx.o \ - lima_gem_prime.o \ lima_dlbu.o \ - lima_bcast.o \ - lima_object.o + lima_bcast.o obj-$(CONFIG_DRM_LIMA) += lima.o diff --git a/drivers/gpu/drm/lima/lima_device.c b/drivers/gpu/drm/lima/lima_device.c index d86b8d81a483..19829b543024 100644 --- a/drivers/gpu/drm/lima/lima_device.c +++ b/drivers/gpu/drm/lima/lima_device.c @@ -105,7 +105,8 @@ static int lima_clk_init(struct lima_device *dev) if (err) goto error_out0; - dev->reset = devm_reset_control_get_optional(dev->dev, NULL); + dev->reset = devm_reset_control_array_get_optional_shared(dev->dev); + if (IS_ERR(dev->reset)) { err = PTR_ERR(dev->reset); if (err != -EPROBE_DEFER) @@ -313,7 +314,7 @@ int lima_device_init(struct lima_device *ldev) ldev->va_end = LIMA_VA_RESERVE_START; ldev->dlbu_cpu = dma_alloc_wc( ldev->dev, LIMA_PAGE_SIZE, - &ldev->dlbu_dma, GFP_KERNEL); + &ldev->dlbu_dma, GFP_KERNEL | __GFP_NOWARN); if (!ldev->dlbu_cpu) { err = -ENOMEM; goto err_out2; diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c index 75ec703d22e0..124efe4fa97b 100644 --- a/drivers/gpu/drm/lima/lima_drv.c +++ b/drivers/gpu/drm/lima/lima_drv.c @@ -12,7 +12,6 @@ #include "lima_drv.h" #include "lima_gem.h" -#include "lima_gem_prime.h" #include "lima_vm.h" int lima_sched_timeout_ms; @@ -240,16 +239,7 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = { DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_RENDER_ALLOW), }; -static const struct file_operations lima_drm_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = drm_compat_ioctl, -#endif - .mmap = lima_gem_mmap, -}; +DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops); static struct drm_driver lima_drm_driver = { .driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ, @@ -258,10 +248,6 @@ static struct drm_driver lima_drm_driver = { .ioctls = lima_drm_driver_ioctls, .num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls), .fops = &lima_drm_driver_fops, - .gem_free_object_unlocked = lima_gem_free_object, - .gem_open_object = lima_gem_object_open, - .gem_close_object = lima_gem_object_close, - .gem_vm_ops = &lima_gem_vm_ops, .name = "lima", .desc = "lima DRM", .date = "20190217", @@ -269,11 +255,11 @@ static struct drm_driver lima_drm_driver = { .minor = 0, .patchlevel = 0, + .gem_create_object = lima_gem_create_object, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_import_sg_table = lima_gem_prime_import_sg_table, + .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, - .gem_prime_get_sg_table = lima_gem_prime_get_sg_table, - .gem_prime_mmap = lima_gem_prime_mmap, + .gem_prime_mmap = drm_gem_prime_mmap, }; static int lima_pdev_probe(struct platform_device *pdev) diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 4da21353c3a2..d0059d8c97d8 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -3,7 +3,7 @@ #include <linux/mm.h> #include <linux/sync_file.h> -#include <linux/pfn_t.h> +#include <linux/pagemap.h> #include <drm/drm_file.h> #include <drm/drm_syncobj.h> @@ -13,40 +13,55 @@ #include "lima_drv.h" #include "lima_gem.h" -#include "lima_gem_prime.h" #include "lima_vm.h" -#include "lima_object.h" int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, u32 size, u32 flags, u32 *handle) { int err; - struct lima_bo *bo; - struct lima_device *ldev = to_lima_dev(dev); + gfp_t mask; + struct drm_gem_shmem_object *shmem; + struct drm_gem_object *obj; + struct sg_table *sgt; + + shmem = drm_gem_shmem_create(dev, size); + if (IS_ERR(shmem)) + return PTR_ERR(shmem); + + obj = &shmem->base; - bo = lima_bo_create(ldev, size, flags, NULL); - if (IS_ERR(bo)) - return PTR_ERR(bo); + /* Mali Utgard GPU can only support 32bit address space */ + mask = mapping_gfp_mask(obj->filp->f_mapping); + mask &= ~__GFP_HIGHMEM; + mask |= __GFP_DMA32; + mapping_set_gfp_mask(obj->filp->f_mapping, mask); - err = drm_gem_handle_create(file, &bo->gem, handle); + sgt = drm_gem_shmem_get_pages_sgt(obj); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto out; + } + + err = drm_gem_handle_create(file, obj, handle); +out: /* drop reference from allocate - handle holds it now */ - drm_gem_object_put_unlocked(&bo->gem); + drm_gem_object_put_unlocked(obj); return err; } -void lima_gem_free_object(struct drm_gem_object *obj) +static void lima_gem_free_object(struct drm_gem_object *obj) { struct lima_bo *bo = to_lima_bo(obj); if (!list_empty(&bo->va)) dev_err(obj->dev->dev, "lima gem free bo still has va\n"); - lima_bo_destroy(bo); + drm_gem_shmem_free_object(obj); } -int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) +static int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) { struct lima_bo *bo = to_lima_bo(obj); struct lima_drm_priv *priv = to_lima_drm_priv(file); @@ -55,7 +70,7 @@ int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file) return lima_vm_bo_add(vm, bo, true); } -void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file) +static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file) { struct lima_bo *bo = to_lima_bo(obj); struct lima_drm_priv *priv = to_lima_drm_priv(file); @@ -64,13 +79,41 @@ void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file) lima_vm_bo_del(vm, bo); } +static const struct drm_gem_object_funcs lima_gem_funcs = { + .free = lima_gem_free_object, + .open = lima_gem_object_open, + .close = lima_gem_object_close, + .print_info = drm_gem_shmem_print_info, + .pin = drm_gem_shmem_pin, + .unpin = drm_gem_shmem_unpin, + .get_sg_table = drm_gem_shmem_get_sg_table, + .vmap = drm_gem_shmem_vmap, + .vunmap = drm_gem_shmem_vunmap, + .mmap = drm_gem_shmem_mmap, +}; + +struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size) +{ + struct lima_bo *bo; + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return NULL; + + mutex_init(&bo->lock); + INIT_LIST_HEAD(&bo->va); + + bo->base.base.funcs = &lima_gem_funcs; + + return &bo->base.base; +} + int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset) { struct drm_gem_object *obj; struct lima_bo *bo; struct lima_drm_priv *priv = to_lima_drm_priv(file); struct lima_vm *vm = priv->vm; - int err; obj = drm_gem_object_lookup(file, handle); if (!obj) @@ -80,53 +123,9 @@ int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset) *va = lima_vm_get_va(vm, bo); - err = drm_gem_create_mmap_offset(obj); - if (!err) - *offset = drm_vma_node_offset_addr(&obj->vma_node); + *offset = drm_vma_node_offset_addr(&obj->vma_node); drm_gem_object_put_unlocked(obj); - return err; -} - -static vm_fault_t lima_gem_fault(struct vm_fault *vmf) -{ - struct vm_area_struct *vma = vmf->vma; - struct drm_gem_object *obj = vma->vm_private_data; - struct lima_bo *bo = to_lima_bo(obj); - pfn_t pfn; - pgoff_t pgoff; - - /* We don't use vmf->pgoff since that has the fake offset: */ - pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT; - pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV); - - return vmf_insert_mixed(vma, vmf->address, pfn); -} - -const struct vm_operations_struct lima_gem_vm_ops = { - .fault = lima_gem_fault, - .open = drm_gem_vm_open, - .close = drm_gem_vm_close, -}; - -void lima_set_vma_flags(struct vm_area_struct *vma) -{ - pgprot_t prot = vm_get_page_prot(vma->vm_flags); - - vma->vm_flags |= VM_MIXEDMAP; - vma->vm_flags &= ~VM_PFNMAP; - vma->vm_page_prot = pgprot_writecombine(prot); -} - -int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma) -{ - int ret; - - ret = drm_gem_mmap(filp, vma); - if (ret) - return ret; - - lima_set_vma_flags(vma); return 0; } @@ -136,7 +135,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo, int err = 0; if (!write) { - err = dma_resv_reserve_shared(bo->gem.resv, 1); + err = dma_resv_reserve_shared(lima_bo_resv(bo), 1); if (err) return err; } @@ -145,62 +144,7 @@ static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo, if (explicit) return 0; - return drm_gem_fence_array_add_implicit(&task->deps, &bo->gem, write); -} - -static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos, - struct ww_acquire_ctx *ctx) -{ - int i, ret = 0, contended, slow_locked = -1; - - ww_acquire_init(ctx, &reservation_ww_class); - -retry: - for (i = 0; i < nr_bos; i++) { - if (i == slow_locked) { - slow_locked = -1; - continue; - } - - ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx); - if (ret < 0) { - contended = i; - goto err; - } - } - - ww_acquire_done(ctx); - return 0; - -err: - for (i--; i >= 0; i--) - ww_mutex_unlock(&bos[i]->gem.resv->lock); - - if (slow_locked >= 0) - ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock); - - if (ret == -EDEADLK) { - /* we lost out in a seqno race, lock and retry.. */ - ret = ww_mutex_lock_slow_interruptible( - &bos[contended]->gem.resv->lock, ctx); - if (!ret) { - slow_locked = contended; - goto retry; - } - } - ww_acquire_fini(ctx); - - return ret; -} - -static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos, - struct ww_acquire_ctx *ctx) -{ - int i; - - for (i = 0; i < nr_bos; i++) - ww_mutex_unlock(&bos[i]->gem.resv->lock); - ww_acquire_fini(ctx); + return drm_gem_fence_array_add_implicit(&task->deps, &bo->base.base, write); } static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit) @@ -268,7 +212,8 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) bos[i] = bo; } - err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx); + err = drm_gem_lock_reservations((struct drm_gem_object **)bos, + submit->nr_bos, &ctx); if (err) goto err_out0; @@ -296,15 +241,16 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) for (i = 0; i < submit->nr_bos; i++) { if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE) - dma_resv_add_excl_fence(bos[i]->gem.resv, fence); + dma_resv_add_excl_fence(lima_bo_resv(bos[i]), fence); else - dma_resv_add_shared_fence(bos[i]->gem.resv, fence); + dma_resv_add_shared_fence(lima_bo_resv(bos[i]), fence); } - lima_gem_unlock_bos(bos, submit->nr_bos, &ctx); + drm_gem_unlock_reservations((struct drm_gem_object **)bos, + submit->nr_bos, &ctx); for (i = 0; i < submit->nr_bos; i++) - drm_gem_object_put_unlocked(&bos[i]->gem); + drm_gem_object_put_unlocked(&bos[i]->base.base); if (out_sync) { drm_syncobj_replace_fence(out_sync, fence); @@ -318,13 +264,14 @@ int lima_gem_submit(struct drm_file *file, struct lima_submit *submit) err_out2: lima_sched_task_fini(submit->task); err_out1: - lima_gem_unlock_bos(bos, submit->nr_bos, &ctx); + drm_gem_unlock_reservations((struct drm_gem_object **)bos, + submit->nr_bos, &ctx); err_out0: for (i = 0; i < submit->nr_bos; i++) { if (!bos[i]) break; lima_vm_bo_del(vm, bos[i]); - drm_gem_object_put_unlocked(&bos[i]->gem); + drm_gem_object_put_unlocked(&bos[i]->base.base); } if (out_sync) drm_syncobj_put(out_sync); diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h index 556111a01135..1800feb3e47f 100644 --- a/drivers/gpu/drm/lima/lima_gem.h +++ b/drivers/gpu/drm/lima/lima_gem.h @@ -4,19 +4,37 @@ #ifndef __LIMA_GEM_H__ #define __LIMA_GEM_H__ -struct lima_bo; +#include <drm/drm_gem_shmem_helper.h> + struct lima_submit; -extern const struct vm_operations_struct lima_gem_vm_ops; +struct lima_bo { + struct drm_gem_shmem_object base; + + struct mutex lock; + struct list_head va; +}; + +static inline struct lima_bo * +to_lima_bo(struct drm_gem_object *obj) +{ + return container_of(to_drm_gem_shmem_obj(obj), struct lima_bo, base); +} + +static inline size_t lima_bo_size(struct lima_bo *bo) +{ + return bo->base.base.size; +} + +static inline struct dma_resv *lima_bo_resv(struct lima_bo *bo) +{ + return bo->base.base.resv; +} -struct lima_bo *lima_gem_create_bo(struct drm_device *dev, u32 size, u32 flags); +struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size); int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file, u32 size, u32 flags, u32 *handle); -void lima_gem_free_object(struct drm_gem_object *obj); -int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file); -void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file); int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset); -int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma); int lima_gem_submit(struct drm_file *file, struct lima_submit *submit); int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns); diff --git a/drivers/gpu/drm/lima/lima_gem_prime.c b/drivers/gpu/drm/lima/lima_gem_prime.c deleted file mode 100644 index e3eb251e0a12..000000000000 --- a/drivers/gpu/drm/lima/lima_gem_prime.c +++ /dev/null @@ -1,46 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ - -#include <linux/dma-buf.h> -#include <drm/drm_prime.h> -#include <drm/drm_drv.h> -#include <drm/drm_file.h> - -#include "lima_device.h" -#include "lima_object.h" -#include "lima_gem.h" -#include "lima_gem_prime.h" - -struct drm_gem_object *lima_gem_prime_import_sg_table( - struct drm_device *dev, struct dma_buf_attachment *attach, - struct sg_table *sgt) -{ - struct lima_device *ldev = to_lima_dev(dev); - struct lima_bo *bo; - - bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt); - if (IS_ERR(bo)) - return ERR_CAST(bo); - - return &bo->gem; -} - -struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj) -{ - struct lima_bo *bo = to_lima_bo(obj); - int npages = obj->size >> PAGE_SHIFT; - - return drm_prime_pages_to_sg(bo->pages, npages); -} - -int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) -{ - int ret; - - ret = drm_gem_mmap_obj(obj, obj->size, vma); - if (ret) - return ret; - - lima_set_vma_flags(vma); - return 0; -} diff --git a/drivers/gpu/drm/lima/lima_gem_prime.h b/drivers/gpu/drm/lima/lima_gem_prime.h deleted file mode 100644 index 34b4d35c21e3..000000000000 --- a/drivers/gpu/drm/lima/lima_gem_prime.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ - -#ifndef __LIMA_GEM_PRIME_H__ -#define __LIMA_GEM_PRIME_H__ - -struct drm_gem_object *lima_gem_prime_import_sg_table( - struct drm_device *dev, struct dma_buf_attachment *attach, - struct sg_table *sgt); -struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj); -int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); - -#endif diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c index 8e1651d6a61f..97ec09dee572 100644 --- a/drivers/gpu/drm/lima/lima_mmu.c +++ b/drivers/gpu/drm/lima/lima_mmu.c @@ -8,7 +8,6 @@ #include "lima_device.h" #include "lima_mmu.h" #include "lima_vm.h" -#include "lima_object.h" #include "lima_regs.h" #define mmu_write(reg, data) writel(data, ip->iomem + reg) diff --git a/drivers/gpu/drm/lima/lima_object.c b/drivers/gpu/drm/lima/lima_object.c deleted file mode 100644 index 87123b1d083c..000000000000 --- a/drivers/gpu/drm/lima/lima_object.c +++ /dev/null @@ -1,119 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ - -#include <drm/drm_prime.h> -#include <linux/pagemap.h> -#include <linux/dma-mapping.h> - -#include "lima_object.h" - -void lima_bo_destroy(struct lima_bo *bo) -{ - if (bo->sgt) { - kfree(bo->pages); - drm_prime_gem_destroy(&bo->gem, bo->sgt); - } else { - if (bo->pages_dma_addr) { - int i, npages = bo->gem.size >> PAGE_SHIFT; - - for (i = 0; i < npages; i++) { - if (bo->pages_dma_addr[i]) - dma_unmap_page(bo->gem.dev->dev, - bo->pages_dma_addr[i], - PAGE_SIZE, DMA_BIDIRECTIONAL); - } - } - - if (bo->pages) - drm_gem_put_pages(&bo->gem, bo->pages, true, true); - } - - kfree(bo->pages_dma_addr); - drm_gem_object_release(&bo->gem); - kfree(bo); -} - -static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags) -{ - struct lima_bo *bo; - int err; - - size = PAGE_ALIGN(size); - - bo = kzalloc(sizeof(*bo), GFP_KERNEL); - if (!bo) - return ERR_PTR(-ENOMEM); - - mutex_init(&bo->lock); - INIT_LIST_HEAD(&bo->va); - - err = drm_gem_object_init(dev->ddev, &bo->gem, size); - if (err) { - kfree(bo); - return ERR_PTR(err); - } - - return bo; -} - -struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size, - u32 flags, struct sg_table *sgt) -{ - int i, err; - size_t npages; - struct lima_bo *bo, *ret; - - bo = lima_bo_create_struct(dev, size, flags); - if (IS_ERR(bo)) - return bo; - - npages = bo->gem.size >> PAGE_SHIFT; - - bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL); - if (!bo->pages_dma_addr) { - ret = ERR_PTR(-ENOMEM); - goto err_out; - } - - if (sgt) { - bo->sgt = sgt; - - bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL); - if (!bo->pages) { - ret = ERR_PTR(-ENOMEM); - goto err_out; - } - - err = drm_prime_sg_to_page_addr_arrays( - sgt, bo->pages, bo->pages_dma_addr, npages); - if (err) { - ret = ERR_PTR(err); - goto err_out; - } - } else { - mapping_set_gfp_mask(bo->gem.filp->f_mapping, GFP_DMA32); - bo->pages = drm_gem_get_pages(&bo->gem); - if (IS_ERR(bo->pages)) { - ret = ERR_CAST(bo->pages); - bo->pages = NULL; - goto err_out; - } - - for (i = 0; i < npages; i++) { - dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0, - PAGE_SIZE, DMA_BIDIRECTIONAL); - if (dma_mapping_error(dev->dev, addr)) { - ret = ERR_PTR(-EFAULT); - goto err_out; - } - bo->pages_dma_addr[i] = addr; - } - - } - - return bo; - -err_out: - lima_bo_destroy(bo); - return ret; -} diff --git a/drivers/gpu/drm/lima/lima_object.h b/drivers/gpu/drm/lima/lima_object.h deleted file mode 100644 index 31ca2d8dc0a1..000000000000 --- a/drivers/gpu/drm/lima/lima_object.h +++ /dev/null @@ -1,35 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */ - -#ifndef __LIMA_OBJECT_H__ -#define __LIMA_OBJECT_H__ - -#include <drm/drm_gem.h> - -#include "lima_device.h" - -struct lima_bo { - struct drm_gem_object gem; - - struct page **pages; - dma_addr_t *pages_dma_addr; - struct sg_table *sgt; - void *vaddr; - - struct mutex lock; - struct list_head va; -}; - -static inline struct lima_bo * -to_lima_bo(struct drm_gem_object *obj) -{ - return container_of(obj, struct lima_bo, gem); -} - -struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size, - u32 flags, struct sg_table *sgt); -void lima_bo_destroy(struct lima_bo *bo); -void *lima_bo_vmap(struct lima_bo *bo); -void lima_bo_vunmap(struct lima_bo *bo); - -#endif diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 4127cacac454..f522c5f99729 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -10,7 +10,7 @@ #include "lima_vm.h" #include "lima_mmu.h" #include "lima_l2_cache.h" -#include "lima_object.h" +#include "lima_gem.h" struct lima_fence { struct dma_fence base; @@ -117,7 +117,7 @@ int lima_sched_task_init(struct lima_sched_task *task, return -ENOMEM; for (i = 0; i < num_bos; i++) - drm_gem_object_get(&bos[i]->gem); + drm_gem_object_get(&bos[i]->base.base); err = drm_sched_job_init(&task->base, &context->base, vm); if (err) { @@ -148,7 +148,7 @@ void lima_sched_task_fini(struct lima_sched_task *task) if (task->bos) { for (i = 0; i < task->num_bos; i++) - drm_gem_object_put_unlocked(&task->bos[i]->gem); + drm_gem_object_put_unlocked(&task->bos[i]->base.base); kfree(task->bos); } diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c index 19e88ca16527..840e2350d872 100644 --- a/drivers/gpu/drm/lima/lima_vm.c +++ b/drivers/gpu/drm/lima/lima_vm.c @@ -6,7 +6,7 @@ #include "lima_device.h" #include "lima_vm.h" -#include "lima_object.h" +#include "lima_gem.h" #include "lima_regs.h" struct lima_bo_va { @@ -32,7 +32,7 @@ struct lima_bo_va { #define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT) -static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end) +static void lima_vm_unmap_range(struct lima_vm *vm, u32 start, u32 end) { u32 addr; @@ -44,41 +44,32 @@ static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end) } } -static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma, - u32 start, u32 end) +static int lima_vm_map_page(struct lima_vm *vm, dma_addr_t pa, u32 va) { - u64 addr; - int i = 0; - - for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) { - u32 pbe = LIMA_PBE(addr); - u32 bte = LIMA_BTE(addr); - - if (!vm->bts[pbe].cpu) { - dma_addr_t pts; - u32 *pd; - int j; - - vm->bts[pbe].cpu = dma_alloc_wc( - vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, - &vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO); - if (!vm->bts[pbe].cpu) { - if (addr != start) - lima_vm_unmap_page_table(vm, start, addr - 1); - return -ENOMEM; - } - - pts = vm->bts[pbe].dma; - pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT); - for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) { - pd[j] = pts | LIMA_VM_FLAG_PRESENT; - pts += LIMA_PAGE_SIZE; - } + u32 pbe = LIMA_PBE(va); + u32 bte = LIMA_BTE(va); + + if (!vm->bts[pbe].cpu) { + dma_addr_t pts; + u32 *pd; + int j; + + vm->bts[pbe].cpu = dma_alloc_wc( + vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT, + &vm->bts[pbe].dma, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); + if (!vm->bts[pbe].cpu) + return -ENOMEM; + + pts = vm->bts[pbe].dma; + pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT); + for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) { + pd[j] = pts | LIMA_VM_FLAG_PRESENT; + pts += LIMA_PAGE_SIZE; } - - vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE; } + vm->bts[pbe].cpu[bte] = pa | LIMA_VM_FLAGS_CACHE; + return 0; } @@ -100,7 +91,8 @@ lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo) int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create) { struct lima_bo_va *bo_va; - int err; + struct sg_dma_page_iter sg_iter; + int offset = 0, err; mutex_lock(&bo->lock); @@ -128,14 +120,18 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create) mutex_lock(&vm->lock); - err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size); + err = drm_mm_insert_node(&vm->mm, &bo_va->node, lima_bo_size(bo)); if (err) goto err_out1; - err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start, - bo_va->node.start + bo_va->node.size - 1); - if (err) - goto err_out2; + for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter, bo->base.sgt->nents, 0) { + err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter), + bo_va->node.start + offset); + if (err) + goto err_out2; + + offset += PAGE_SIZE; + } mutex_unlock(&vm->lock); @@ -145,6 +141,8 @@ int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create) return 0; err_out2: + if (offset) + lima_vm_unmap_range(vm, bo_va->node.start, bo_va->node.start + offset - 1); drm_mm_remove_node(&bo_va->node); err_out1: mutex_unlock(&vm->lock); @@ -168,8 +166,8 @@ void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo) mutex_lock(&vm->lock); - lima_vm_unmap_page_table(vm, bo_va->node.start, - bo_va->node.start + bo_va->node.size - 1); + lima_vm_unmap_range(vm, bo_va->node.start, + bo_va->node.start + bo_va->node.size - 1); drm_mm_remove_node(&bo_va->node); @@ -210,14 +208,13 @@ struct lima_vm *lima_vm_create(struct lima_device *dev) kref_init(&vm->refcount); vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma, - GFP_KERNEL | __GFP_ZERO); + GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO); if (!vm->pd.cpu) goto err_out0; if (dev->dlbu_cpu) { - int err = lima_vm_map_page_table( - vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU, - LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1); + int err = lima_vm_map_page( + vm, dev->dlbu_dma, LIMA_VA_RESERVE_DLBU); if (err) goto err_out1; } diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index 9a09eba53182..5649887d2b90 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -484,7 +484,8 @@ static int mcde_probe(struct platform_device *pdev) } if (!match) { dev_err(dev, "no matching components\n"); - return -ENODEV; + ret = -ENODEV; + goto clk_disable; } if (IS_ERR(match)) { dev_err(dev, "could not create component match\n"); diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c index f9c9e32b299c..d6214d3c8b33 100644 --- a/drivers/gpu/drm/mcde/mcde_dsi.c +++ b/drivers/gpu/drm/mcde/mcde_dsi.c @@ -946,8 +946,8 @@ static int mcde_dsi_bind(struct device *dev, struct device *master, } } if (panel) { - bridge = drm_panel_bridge_add(panel, - DRM_MODE_CONNECTOR_DSI); + bridge = drm_panel_bridge_add_typed(panel, + DRM_MODE_CONNECTOR_DSI); if (IS_ERR(bridge)) { dev_err(dev, "error adding panel bridge\n"); return PTR_ERR(bridge); diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index 82ae49c64221..8067a4be8311 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -12,6 +12,8 @@ mediatek-drm-y := mtk_disp_color.o \ mtk_drm_plane.o \ mtk_dsi.o \ mtk_mipi_tx.o \ + mtk_mt8173_mipi_tx.o \ + mtk_mt8183_mipi_tx.o \ mtk_dpi.o obj-$(CONFIG_DRM_MEDIATEK) += mediatek-drm.o diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 21851756c579..4a55bb6e2213 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -3,6 +3,8 @@ * Copyright (c) 2015 MediaTek Inc. */ +#include <drm/drm_fourcc.h> + #include <linux/clk.h> #include <linux/component.h> #include <linux/module.h> @@ -19,6 +21,8 @@ #define DISP_REG_OVL_EN 0x000c #define DISP_REG_OVL_RST 0x0014 #define DISP_REG_OVL_ROI_SIZE 0x0020 +#define DISP_REG_OVL_DATAPATH_CON 0x0024 +#define OVL_BGCLR_SEL_IN BIT(2) #define DISP_REG_OVL_ROI_BGCLR 0x0028 #define DISP_REG_OVL_SRC_CON 0x002c #define DISP_REG_OVL_CON(n) (0x0030 + 0x20 * (n)) @@ -31,7 +35,9 @@ #define DISP_REG_OVL_ADDR_MT8173 0x0f40 #define DISP_REG_OVL_ADDR(ovl, n) ((ovl)->data->addr + 0x20 * (n)) -#define OVL_RDMA_MEM_GMC 0x40402020 +#define GMC_THRESHOLD_BITS 16 +#define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4) +#define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8) #define OVL_CON_BYTE_SWAP BIT(24) #define OVL_CON_MTX_YUV_TO_RGB (6 << 16) @@ -46,9 +52,13 @@ OVL_CON_CLRFMT_RGB : 0) #define OVL_CON_AEN BIT(8) #define OVL_CON_ALPHA 0xff +#define OVL_CON_VIRT_FLIP BIT(9) +#define OVL_CON_HORZ_FLIP BIT(10) struct mtk_disp_ovl_data { unsigned int addr; + unsigned int gmc_bits; + unsigned int layer_nr; bool fmt_rgb565_is_0; }; @@ -126,15 +136,65 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w, static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp) { - return 4; + struct mtk_disp_ovl *ovl = comp_to_ovl(comp); + + return ovl->data->layer_nr; +} + +static unsigned int mtk_ovl_supported_rotations(struct mtk_ddp_comp *comp) +{ + return DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 | + DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y; +} + +static int mtk_ovl_layer_check(struct mtk_ddp_comp *comp, unsigned int idx, + struct mtk_plane_state *mtk_state) +{ + struct drm_plane_state *state = &mtk_state->base; + unsigned int rotation = 0; + + rotation = drm_rotation_simplify(state->rotation, + DRM_MODE_ROTATE_0 | + DRM_MODE_REFLECT_X | + DRM_MODE_REFLECT_Y); + rotation &= ~DRM_MODE_ROTATE_0; + + /* We can only do reflection, not rotation */ + if ((rotation & DRM_MODE_ROTATE_MASK) != 0) + return -EINVAL; + + /* + * TODO: Rotating/reflecting YUV buffers is not supported at this time. + * Only RGB[AX] variants are supported. + */ + if (state->fb->format->is_yuv && rotation != 0) + return -EINVAL; + + state->rotation = rotation; + + return 0; } static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx) { unsigned int reg; + unsigned int gmc_thrshd_l; + unsigned int gmc_thrshd_h; + unsigned int gmc_value; + struct mtk_disp_ovl *ovl = comp_to_ovl(comp); writel(0x1, comp->regs + DISP_REG_OVL_RDMA_CTRL(idx)); - writel(OVL_RDMA_MEM_GMC, comp->regs + DISP_REG_OVL_RDMA_GMC(idx)); + + gmc_thrshd_l = GMC_THRESHOLD_LOW >> + (GMC_THRESHOLD_BITS - ovl->data->gmc_bits); + gmc_thrshd_h = GMC_THRESHOLD_HIGH >> + (GMC_THRESHOLD_BITS - ovl->data->gmc_bits); + if (ovl->data->gmc_bits == 10) + gmc_value = gmc_thrshd_h | gmc_thrshd_h << 16; + else + gmc_value = gmc_thrshd_l | gmc_thrshd_l << 8 | + gmc_thrshd_h << 16 | gmc_thrshd_h << 24; + writel(gmc_value, comp->regs + DISP_REG_OVL_RDMA_GMC(idx)); reg = readl(comp->regs + DISP_REG_OVL_SRC_CON); reg = reg | BIT(idx); @@ -207,6 +267,16 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, if (idx != 0) con |= OVL_CON_AEN | OVL_CON_ALPHA; + if (pending->rotation & DRM_MODE_REFLECT_Y) { + con |= OVL_CON_VIRT_FLIP; + addr += (pending->height - 1) * pending->pitch; + } + + if (pending->rotation & DRM_MODE_REFLECT_X) { + con |= OVL_CON_HORZ_FLIP; + addr += pending->pitch - 1; + } + writel_relaxed(con, comp->regs + DISP_REG_OVL_CON(idx)); writel_relaxed(pitch, comp->regs + DISP_REG_OVL_PITCH(idx)); writel_relaxed(src_size, comp->regs + DISP_REG_OVL_SRC_SIZE(idx)); @@ -217,16 +287,38 @@ static void mtk_ovl_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, mtk_ovl_layer_on(comp, idx); } +static void mtk_ovl_bgclr_in_on(struct mtk_ddp_comp *comp) +{ + unsigned int reg; + + reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON); + reg = reg | OVL_BGCLR_SEL_IN; + writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON); +} + +static void mtk_ovl_bgclr_in_off(struct mtk_ddp_comp *comp) +{ + unsigned int reg; + + reg = readl(comp->regs + DISP_REG_OVL_DATAPATH_CON); + reg = reg & ~OVL_BGCLR_SEL_IN; + writel(reg, comp->regs + DISP_REG_OVL_DATAPATH_CON); +} + static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = { .config = mtk_ovl_config, .start = mtk_ovl_start, .stop = mtk_ovl_stop, .enable_vblank = mtk_ovl_enable_vblank, .disable_vblank = mtk_ovl_disable_vblank, + .supported_rotations = mtk_ovl_supported_rotations, .layer_nr = mtk_ovl_layer_nr, .layer_on = mtk_ovl_layer_on, .layer_off = mtk_ovl_layer_off, + .layer_check = mtk_ovl_layer_check, .layer_config = mtk_ovl_layer_config, + .bgclr_in_on = mtk_ovl_bgclr_in_on, + .bgclr_in_off = mtk_ovl_bgclr_in_off, }; static int mtk_disp_ovl_bind(struct device *dev, struct device *master, @@ -276,7 +368,12 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev) if (irq < 0) return irq; - comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_OVL); + priv->data = of_device_get_match_data(dev); + + comp_id = mtk_ddp_comp_get_id(dev->of_node, + priv->data->layer_nr == 4 ? + MTK_DISP_OVL : + MTK_DISP_OVL_2L); if (comp_id < 0) { dev_err(dev, "Failed to identify by alias: %d\n", comp_id); return comp_id; @@ -289,8 +386,6 @@ static int mtk_disp_ovl_probe(struct platform_device *pdev) return ret; } - priv->data = of_device_get_match_data(dev); - platform_set_drvdata(pdev, priv); ret = devm_request_irq(dev, irq, mtk_disp_ovl_irq_handler, @@ -316,11 +411,15 @@ static int mtk_disp_ovl_remove(struct platform_device *pdev) static const struct mtk_disp_ovl_data mt2701_ovl_driver_data = { .addr = DISP_REG_OVL_ADDR_MT2701, + .gmc_bits = 8, + .layer_nr = 4, .fmt_rgb565_is_0 = false, }; static const struct mtk_disp_ovl_data mt8173_ovl_driver_data = { .addr = DISP_REG_OVL_ADDR_MT8173, + .gmc_bits = 8, + .layer_nr = 4, .fmt_rgb565_is_0 = true, }; diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c index be6d95c5ff25..01fa8b8d763d 100644 --- a/drivers/gpu/drm/mediatek/mtk_dpi.c +++ b/drivers/gpu/drm/mediatek/mtk_dpi.c @@ -17,6 +17,7 @@ #include <video/videomode.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_of.h> diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 34a731755791..f80a8ba75977 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -207,6 +207,28 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc) clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk); } +static +struct mtk_ddp_comp *mtk_drm_ddp_comp_for_plane(struct drm_crtc *crtc, + struct drm_plane *plane, + unsigned int *local_layer) +{ + struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_ddp_comp *comp; + int i, count = 0; + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + comp = mtk_crtc->ddp_comp[i]; + if (plane->index < (count + mtk_ddp_comp_layer_nr(comp))) { + *local_layer = plane->index - count; + return comp; + } + count += mtk_ddp_comp_layer_nr(comp); + } + + WARN(1, "Failed to find component for plane %d\n", plane->index); + return NULL; +} + static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) { struct drm_crtc *crtc = &mtk_crtc->base; @@ -272,6 +294,9 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[i]; + if (i == 1) + mtk_ddp_comp_bgclr_in_on(comp); + mtk_ddp_comp_config(comp, width, height, vrefresh, bpc); mtk_ddp_comp_start(comp); } @@ -280,10 +305,12 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc) for (i = 0; i < mtk_crtc->layer_nr; i++) { struct drm_plane *plane = &mtk_crtc->planes[i]; struct mtk_plane_state *plane_state; + struct mtk_ddp_comp *comp; + unsigned int local_layer; plane_state = to_mtk_plane_state(plane->state); - mtk_ddp_comp_layer_config(mtk_crtc->ddp_comp[0], i, - plane_state); + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); + mtk_ddp_comp_layer_config(comp, local_layer, plane_state); } return 0; @@ -301,8 +328,12 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_drm_crtc *mtk_crtc) int i; DRM_DEBUG_DRIVER("%s\n", __func__); - for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { mtk_ddp_comp_stop(mtk_crtc->ddp_comp[i]); + if (i == 1) + mtk_ddp_comp_bgclr_in_off(mtk_crtc->ddp_comp[i]); + } + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) mtk_disp_mutex_remove_comp(mtk_crtc->mutex, mtk_crtc->ddp_comp[i]->id); @@ -327,6 +358,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state); struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0]; unsigned int i; + unsigned int local_layer; /* * TODO: instead of updating the registers here, we should prepare @@ -348,15 +380,30 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc) plane_state = to_mtk_plane_state(plane->state); - if (plane_state->pending.config) { - mtk_ddp_comp_layer_config(comp, i, plane_state); - plane_state->pending.config = false; - } + if (!plane_state->pending.config) + continue; + + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, + &local_layer); + + mtk_ddp_comp_layer_config(comp, local_layer, + plane_state); + plane_state->pending.config = false; } mtk_crtc->pending_planes = false; } } +int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, + struct mtk_plane_state *state) +{ + unsigned int local_layer; + struct mtk_ddp_comp *comp; + + comp = mtk_drm_ddp_comp_for_plane(crtc, plane, &local_layer); + return mtk_ddp_comp_layer_check(comp, local_layer, state); +} + static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -518,14 +565,65 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp) mtk_drm_finish_page_flip(mtk_crtc); } +static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc, + int comp_idx) +{ + struct mtk_ddp_comp *comp; + + if (comp_idx > 1) + return 0; + + comp = mtk_crtc->ddp_comp[comp_idx]; + if (!comp->funcs) + return 0; + + if (comp_idx == 1 && !comp->funcs->bgclr_in_on) + return 0; + + return mtk_ddp_comp_layer_nr(comp); +} + +static inline +enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx) +{ + if (plane_idx == 0) + return DRM_PLANE_TYPE_PRIMARY; + else if (plane_idx == 1) + return DRM_PLANE_TYPE_CURSOR; + else + return DRM_PLANE_TYPE_OVERLAY; + +} + +static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev, + struct mtk_drm_crtc *mtk_crtc, + int comp_idx, int pipe) +{ + int num_planes = mtk_drm_crtc_num_comp_planes(mtk_crtc, comp_idx); + struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[comp_idx]; + int i, ret; + + for (i = 0; i < num_planes; i++) { + ret = mtk_plane_init(drm_dev, + &mtk_crtc->planes[mtk_crtc->layer_nr], + BIT(pipe), + mtk_drm_crtc_plane_type(mtk_crtc->layer_nr), + mtk_ddp_comp_supported_rotations(comp)); + if (ret) + return ret; + + mtk_crtc->layer_nr++; + } + return 0; +} + int mtk_drm_crtc_create(struct drm_device *drm_dev, const enum mtk_ddp_comp_id *path, unsigned int path_len) { struct mtk_drm_private *priv = drm_dev->dev_private; struct device *dev = drm_dev->dev; struct mtk_drm_crtc *mtk_crtc; - enum drm_plane_type type; - unsigned int zpos; + unsigned int num_comp_planes = 0; int pipe = priv->num_pipes; int ret; int i; @@ -581,17 +679,15 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mtk_crtc->ddp_comp[i] = comp; } - mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]); - mtk_crtc->planes = devm_kcalloc(dev, mtk_crtc->layer_nr, - sizeof(struct drm_plane), - GFP_KERNEL); - - for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) { - type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY : - (zpos == 1) ? DRM_PLANE_TYPE_CURSOR : - DRM_PLANE_TYPE_OVERLAY; - ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos], - BIT(pipe), type); + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) + num_comp_planes += mtk_drm_crtc_num_comp_planes(mtk_crtc, i); + + mtk_crtc->planes = devm_kcalloc(dev, num_comp_planes, + sizeof(struct drm_plane), GFP_KERNEL); + + for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) { + ret = mtk_drm_crtc_init_comp_planes(drm_dev, mtk_crtc, i, + pipe); if (ret) return ret; } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h index fcc134eb00c9..6afe1c19557a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h @@ -19,5 +19,7 @@ void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp); int mtk_drm_crtc_create(struct drm_device *drm_dev, const enum mtk_ddp_comp_id *path, unsigned int path_len); +int mtk_drm_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, + struct mtk_plane_state *state); #endif /* MTK_DRM_CRTC_H */ diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c index 8106a71a7404..13035c906035 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c @@ -33,12 +33,15 @@ #define DISP_REG_CONFIG_DSI_SEL 0x050 #define DISP_REG_CONFIG_DPI_SEL 0x064 -#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n)) -#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n)) -#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n)) -#define DISP_REG_MUTEX_MOD(n) (0x2c + 0x20 * (n)) -#define DISP_REG_MUTEX_SOF(n) (0x30 + 0x20 * (n)) -#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n)) +#define MT2701_DISP_MUTEX0_MOD0 0x2c +#define MT2701_DISP_MUTEX0_SOF0 0x30 + +#define DISP_REG_MUTEX_EN(n) (0x20 + 0x20 * (n)) +#define DISP_REG_MUTEX(n) (0x24 + 0x20 * (n)) +#define DISP_REG_MUTEX_RST(n) (0x28 + 0x20 * (n)) +#define DISP_REG_MUTEX_MOD(mutex_mod_reg, n) (mutex_mod_reg + 0x20 * (n)) +#define DISP_REG_MUTEX_SOF(mutex_sof_reg, n) (mutex_sof_reg + 0x20 * (n)) +#define DISP_REG_MUTEX_MOD2(n) (0x34 + 0x20 * (n)) #define INT_MUTEX BIT(1) @@ -139,12 +142,30 @@ struct mtk_disp_mutex { bool claimed; }; +enum mtk_ddp_mutex_sof_id { + DDP_MUTEX_SOF_SINGLE_MODE, + DDP_MUTEX_SOF_DSI0, + DDP_MUTEX_SOF_DSI1, + DDP_MUTEX_SOF_DPI0, + DDP_MUTEX_SOF_DPI1, + DDP_MUTEX_SOF_DSI2, + DDP_MUTEX_SOF_DSI3, +}; + +struct mtk_ddp_data { + const unsigned int *mutex_mod; + const unsigned int *mutex_sof; + const unsigned int mutex_mod_reg; + const unsigned int mutex_sof_reg; + const bool no_clk; +}; + struct mtk_ddp { struct device *dev; struct clk *clk; void __iomem *regs; struct mtk_disp_mutex mutex[10]; - const unsigned int *mutex_mod; + const struct mtk_ddp_data *data; }; static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = { @@ -194,6 +215,37 @@ static const unsigned int mt8173_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA1] = MT8173_MUTEX_MOD_DISP_WDMA1, }; +static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_DSI3 + 1] = { + [DDP_MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, + [DDP_MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0, + [DDP_MUTEX_SOF_DSI1] = MUTEX_SOF_DSI1, + [DDP_MUTEX_SOF_DPI0] = MUTEX_SOF_DPI0, + [DDP_MUTEX_SOF_DPI1] = MUTEX_SOF_DPI1, + [DDP_MUTEX_SOF_DSI2] = MUTEX_SOF_DSI2, + [DDP_MUTEX_SOF_DSI3] = MUTEX_SOF_DSI3, +}; + +static const struct mtk_ddp_data mt2701_ddp_driver_data = { + .mutex_mod = mt2701_mutex_mod, + .mutex_sof = mt2712_mutex_sof, + .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0, + .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0, +}; + +static const struct mtk_ddp_data mt2712_ddp_driver_data = { + .mutex_mod = mt2712_mutex_mod, + .mutex_sof = mt2712_mutex_sof, + .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0, + .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0, +}; + +static const struct mtk_ddp_data mt8173_ddp_driver_data = { + .mutex_mod = mt8173_mutex_mod, + .mutex_sof = mt2712_mutex_sof, + .mutex_mod_reg = MT2701_DISP_MUTEX0_MOD0, + .mutex_sof_reg = MT2701_DISP_MUTEX0_SOF0, +}; + static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur, enum mtk_ddp_comp_id next, unsigned int *addr) @@ -432,45 +484,49 @@ void mtk_disp_mutex_add_comp(struct mtk_disp_mutex *mutex, struct mtk_ddp *ddp = container_of(mutex, struct mtk_ddp, mutex[mutex->id]); unsigned int reg; + unsigned int sof_id; unsigned int offset; WARN_ON(&ddp->mutex[mutex->id] != mutex); switch (id) { case DDP_COMPONENT_DSI0: - reg = MUTEX_SOF_DSI0; + sof_id = DDP_MUTEX_SOF_DSI0; break; case DDP_COMPONENT_DSI1: - reg = MUTEX_SOF_DSI0; + sof_id = DDP_MUTEX_SOF_DSI0; break; case DDP_COMPONENT_DSI2: - reg = MUTEX_SOF_DSI2; + sof_id = DDP_MUTEX_SOF_DSI2; break; case DDP_COMPONENT_DSI3: - reg = MUTEX_SOF_DSI3; + sof_id = DDP_MUTEX_SOF_DSI3; break; case DDP_COMPONENT_DPI0: - reg = MUTEX_SOF_DPI0; + sof_id = DDP_MUTEX_SOF_DPI0; break; case DDP_COMPONENT_DPI1: - reg = MUTEX_SOF_DPI1; + sof_id = DDP_MUTEX_SOF_DPI1; break; default: - if (ddp->mutex_mod[id] < 32) { - offset = DISP_REG_MUTEX_MOD(mutex->id); + if (ddp->data->mutex_mod[id] < 32) { + offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg, + mutex->id); reg = readl_relaxed(ddp->regs + offset); - reg |= 1 << ddp->mutex_mod[id]; + reg |= 1 << ddp->data->mutex_mod[id]; writel_relaxed(reg, ddp->regs + offset); } else { offset = DISP_REG_MUTEX_MOD2(mutex->id); reg = readl_relaxed(ddp->regs + offset); - reg |= 1 << (ddp->mutex_mod[id] - 32); + reg |= 1 << (ddp->data->mutex_mod[id] - 32); writel_relaxed(reg, ddp->regs + offset); } return; } - writel_relaxed(reg, ddp->regs + DISP_REG_MUTEX_SOF(mutex->id)); + writel_relaxed(ddp->data->mutex_sof[sof_id], + ddp->regs + + DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, mutex->id)); } void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex, @@ -491,18 +547,21 @@ void mtk_disp_mutex_remove_comp(struct mtk_disp_mutex *mutex, case DDP_COMPONENT_DPI0: case DDP_COMPONENT_DPI1: writel_relaxed(MUTEX_SOF_SINGLE_MODE, - ddp->regs + DISP_REG_MUTEX_SOF(mutex->id)); + ddp->regs + + DISP_REG_MUTEX_SOF(ddp->data->mutex_sof_reg, + mutex->id)); break; default: - if (ddp->mutex_mod[id] < 32) { - offset = DISP_REG_MUTEX_MOD(mutex->id); + if (ddp->data->mutex_mod[id] < 32) { + offset = DISP_REG_MUTEX_MOD(ddp->data->mutex_mod_reg, + mutex->id); reg = readl_relaxed(ddp->regs + offset); - reg &= ~(1 << ddp->mutex_mod[id]); + reg &= ~(1 << ddp->data->mutex_mod[id]); writel_relaxed(reg, ddp->regs + offset); } else { offset = DISP_REG_MUTEX_MOD2(mutex->id); reg = readl_relaxed(ddp->regs + offset); - reg &= ~(1 << (ddp->mutex_mod[id] - 32)); + reg &= ~(1 << (ddp->data->mutex_mod[id] - 32)); writel_relaxed(reg, ddp->regs + offset); } break; @@ -564,10 +623,14 @@ static int mtk_ddp_probe(struct platform_device *pdev) for (i = 0; i < 10; i++) ddp->mutex[i].id = i; - ddp->clk = devm_clk_get(dev, NULL); - if (IS_ERR(ddp->clk)) { - dev_err(dev, "Failed to get clock\n"); - return PTR_ERR(ddp->clk); + ddp->data = of_device_get_match_data(dev); + + if (!ddp->data->no_clk) { + ddp->clk = devm_clk_get(dev, NULL); + if (IS_ERR(ddp->clk)) { + dev_err(dev, "Failed to get clock\n"); + return PTR_ERR(ddp->clk); + } } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -577,8 +640,6 @@ static int mtk_ddp_probe(struct platform_device *pdev) return PTR_ERR(ddp->regs); } - ddp->mutex_mod = of_device_get_match_data(dev); - platform_set_drvdata(pdev, ddp); return 0; @@ -590,9 +651,12 @@ static int mtk_ddp_remove(struct platform_device *pdev) } static const struct of_device_id ddp_driver_dt_match[] = { - { .compatible = "mediatek,mt2701-disp-mutex", .data = mt2701_mutex_mod}, - { .compatible = "mediatek,mt2712-disp-mutex", .data = mt2712_mutex_mod}, - { .compatible = "mediatek,mt8173-disp-mutex", .data = mt8173_mutex_mod}, + { .compatible = "mediatek,mt2701-disp-mutex", + .data = &mt2701_ddp_driver_data}, + { .compatible = "mediatek,mt2712-disp-mutex", + .data = &mt2712_ddp_driver_data}, + { .compatible = "mediatek,mt8173-disp-mutex", + .data = &mt8173_ddp_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, ddp_driver_dt_match); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index efa85973e46b..7f21307cda75 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -33,6 +33,18 @@ #define DISP_AAL_EN 0x0000 #define DISP_AAL_SIZE 0x0030 +#define DISP_CCORR_EN 0x0000 +#define CCORR_EN BIT(0) +#define DISP_CCORR_CFG 0x0020 +#define CCORR_RELAY_MODE BIT(0) +#define DISP_CCORR_SIZE 0x0030 + +#define DISP_DITHER_EN 0x0000 +#define DITHER_EN BIT(0) +#define DISP_DITHER_CFG 0x0020 +#define DITHER_RELAY_MODE BIT(0) +#define DISP_DITHER_SIZE 0x0030 + #define DISP_GAMMA_EN 0x0000 #define DISP_GAMMA_CFG 0x0020 #define DISP_GAMMA_SIZE 0x0030 @@ -123,6 +135,42 @@ static void mtk_aal_stop(struct mtk_ddp_comp *comp) writel_relaxed(0x0, comp->regs + DISP_AAL_EN); } +static void mtk_ccorr_config(struct mtk_ddp_comp *comp, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc) +{ + writel(h << 16 | w, comp->regs + DISP_CCORR_SIZE); + writel(CCORR_RELAY_MODE, comp->regs + DISP_CCORR_CFG); +} + +static void mtk_ccorr_start(struct mtk_ddp_comp *comp) +{ + writel(CCORR_EN, comp->regs + DISP_CCORR_EN); +} + +static void mtk_ccorr_stop(struct mtk_ddp_comp *comp) +{ + writel_relaxed(0x0, comp->regs + DISP_CCORR_EN); +} + +static void mtk_dither_config(struct mtk_ddp_comp *comp, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc) +{ + writel(h << 16 | w, comp->regs + DISP_DITHER_SIZE); + writel(DITHER_RELAY_MODE, comp->regs + DISP_DITHER_CFG); +} + +static void mtk_dither_start(struct mtk_ddp_comp *comp) +{ + writel(DITHER_EN, comp->regs + DISP_DITHER_EN); +} + +static void mtk_dither_stop(struct mtk_ddp_comp *comp) +{ + writel_relaxed(0x0, comp->regs + DISP_DITHER_EN); +} + static void mtk_gamma_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, unsigned int bpc) @@ -171,6 +219,18 @@ static const struct mtk_ddp_comp_funcs ddp_aal = { .stop = mtk_aal_stop, }; +static const struct mtk_ddp_comp_funcs ddp_ccorr = { + .config = mtk_ccorr_config, + .start = mtk_ccorr_start, + .stop = mtk_ccorr_stop, +}; + +static const struct mtk_ddp_comp_funcs ddp_dither = { + .config = mtk_dither_config, + .start = mtk_dither_start, + .stop = mtk_dither_stop, +}; + static const struct mtk_ddp_comp_funcs ddp_gamma = { .gamma_set = mtk_gamma_set, .config = mtk_gamma_config, @@ -189,11 +249,14 @@ static const struct mtk_ddp_comp_funcs ddp_ufoe = { static const char * const mtk_ddp_comp_stem[MTK_DDP_COMP_TYPE_MAX] = { [MTK_DISP_OVL] = "ovl", + [MTK_DISP_OVL_2L] = "ovl_2l", [MTK_DISP_RDMA] = "rdma", [MTK_DISP_WDMA] = "wdma", [MTK_DISP_COLOR] = "color", + [MTK_DISP_CCORR] = "ccorr", [MTK_DISP_AAL] = "aal", [MTK_DISP_GAMMA] = "gamma", + [MTK_DISP_DITHER] = "dither", [MTK_DISP_UFOE] = "ufoe", [MTK_DSI] = "dsi", [MTK_DPI] = "dpi", @@ -213,8 +276,10 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = { MTK_DISP_AAL, 0, &ddp_aal }, [DDP_COMPONENT_AAL1] = { MTK_DISP_AAL, 1, &ddp_aal }, [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, + [DDP_COMPONENT_CCORR] = { MTK_DISP_CCORR, 0, &ddp_ccorr }, [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL }, [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL }, + [DDP_COMPONENT_DITHER] = { MTK_DISP_DITHER, 0, &ddp_dither }, [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL }, [DDP_COMPONENT_DPI1] = { MTK_DPI, 1, NULL }, [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL }, @@ -226,6 +291,8 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_OD1] = { MTK_DISP_OD, 1, &ddp_od }, [DDP_COMPONENT_OVL0] = { MTK_DISP_OVL, 0, NULL }, [DDP_COMPONENT_OVL1] = { MTK_DISP_OVL, 1, NULL }, + [DDP_COMPONENT_OVL_2L0] = { MTK_DISP_OVL_2L, 0, NULL }, + [DDP_COMPONENT_OVL_2L1] = { MTK_DISP_OVL_2L, 1, NULL }, [DDP_COMPONENT_PWM0] = { MTK_DISP_PWM, 0, NULL }, [DDP_COMPONENT_PWM1] = { MTK_DISP_PWM, 1, NULL }, [DDP_COMPONENT_PWM2] = { MTK_DISP_PWM, 2, NULL }, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h index 0ad287f427cc..2f1e9e75b8da 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h @@ -17,9 +17,12 @@ struct drm_crtc_state; enum mtk_ddp_comp_type { MTK_DISP_OVL, + MTK_DISP_OVL_2L, MTK_DISP_RDMA, MTK_DISP_WDMA, MTK_DISP_COLOR, + MTK_DISP_CCORR, + MTK_DISP_DITHER, MTK_DISP_AAL, MTK_DISP_GAMMA, MTK_DISP_UFOE, @@ -36,8 +39,10 @@ enum mtk_ddp_comp_id { DDP_COMPONENT_AAL0, DDP_COMPONENT_AAL1, DDP_COMPONENT_BLS, + DDP_COMPONENT_CCORR, DDP_COMPONENT_COLOR0, DDP_COMPONENT_COLOR1, + DDP_COMPONENT_DITHER, DDP_COMPONENT_DPI0, DDP_COMPONENT_DPI1, DDP_COMPONENT_DSI0, @@ -48,6 +53,8 @@ enum mtk_ddp_comp_id { DDP_COMPONENT_OD0, DDP_COMPONENT_OD1, DDP_COMPONENT_OVL0, + DDP_COMPONENT_OVL_2L0, + DDP_COMPONENT_OVL_2L1, DDP_COMPONENT_OVL1, DDP_COMPONENT_PWM0, DDP_COMPONENT_PWM1, @@ -70,13 +77,19 @@ struct mtk_ddp_comp_funcs { void (*stop)(struct mtk_ddp_comp *comp); void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc); void (*disable_vblank)(struct mtk_ddp_comp *comp); + unsigned int (*supported_rotations)(struct mtk_ddp_comp *comp); unsigned int (*layer_nr)(struct mtk_ddp_comp *comp); void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx); void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx); + int (*layer_check)(struct mtk_ddp_comp *comp, + unsigned int idx, + struct mtk_plane_state *state); void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state); void (*gamma_set)(struct mtk_ddp_comp *comp, struct drm_crtc_state *state); + void (*bgclr_in_on)(struct mtk_ddp_comp *comp); + void (*bgclr_in_off)(struct mtk_ddp_comp *comp); }; struct mtk_ddp_comp { @@ -121,6 +134,15 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp) comp->funcs->disable_vblank(comp); } +static inline +unsigned int mtk_ddp_comp_supported_rotations(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->supported_rotations) + return comp->funcs->supported_rotations(comp); + + return 0; +} + static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp) { if (comp->funcs && comp->funcs->layer_nr) @@ -143,6 +165,15 @@ static inline void mtk_ddp_comp_layer_off(struct mtk_ddp_comp *comp, comp->funcs->layer_off(comp, idx); } +static inline int mtk_ddp_comp_layer_check(struct mtk_ddp_comp *comp, + unsigned int idx, + struct mtk_plane_state *state) +{ + if (comp->funcs && comp->funcs->layer_check) + return comp->funcs->layer_check(comp, idx, state); + return 0; +} + static inline void mtk_ddp_comp_layer_config(struct mtk_ddp_comp *comp, unsigned int idx, struct mtk_plane_state *state) @@ -158,6 +189,18 @@ static inline void mtk_ddp_gamma_set(struct mtk_ddp_comp *comp, comp->funcs->gamma_set(comp, state); } +static inline void mtk_ddp_comp_bgclr_in_on(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->bgclr_in_on) + comp->funcs->bgclr_in_on(comp); +} + +static inline void mtk_ddp_comp_bgclr_in_off(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->bgclr_in_off) + comp->funcs->bgclr_in_off(comp); +} + int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type); int mtk_ddp_comp_init(struct device *dev, struct device_node *comp_node, diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index 352b81a7a670..84d14213d992 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -547,6 +547,7 @@ static int mtk_drm_probe(struct platform_device *pdev) */ if (comp_type == MTK_DISP_COLOR || comp_type == MTK_DISP_OVL || + comp_type == MTK_DISP_OVL_2L || comp_type == MTK_DISP_RDMA || comp_type == MTK_DSI || comp_type == MTK_DPI) { @@ -669,8 +670,8 @@ static struct platform_driver * const mtk_drm_drivers[] = { &mtk_disp_rdma_driver, &mtk_dpi_driver, &mtk_drm_platform_driver, - &mtk_dsi_driver, &mtk_mipi_tx_driver, + &mtk_dsi_driver, }; static int __init mtk_drm_init(void) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_gem.c b/drivers/gpu/drm/mediatek/mtk_drm_gem.c index ca672f1d140d..b04a3c2b111e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_gem.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_gem.c @@ -271,7 +271,7 @@ void *mtk_drm_gem_prime_vmap(struct drm_gem_object *obj) pgprot_writecombine(PAGE_KERNEL)); out: - kfree((void *)sgt); + kfree(sgt); return mtk_gem->kvaddr; } @@ -285,5 +285,5 @@ void mtk_drm_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) vunmap(vaddr); mtk_gem->kvaddr = 0; - kfree((void *)mtk_gem->pages); + kfree(mtk_gem->pages); } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index 584a9ecadce6..3b0cc91c7023 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -20,6 +20,12 @@ static const u32 formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, + DRM_FORMAT_BGRX8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, DRM_FORMAT_RGB565, DRM_FORMAT_UYVY, DRM_FORMAT_YUYV, @@ -84,6 +90,7 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, { struct drm_framebuffer *fb = state->fb; struct drm_crtc_state *crtc_state; + int ret; if (!fb) return 0; @@ -91,6 +98,11 @@ static int mtk_plane_atomic_check(struct drm_plane *plane, if (!state->crtc) return 0; + ret = mtk_drm_crtc_plane_check(state->crtc, plane, + to_mtk_plane_state(state)); + if (ret) + return ret; + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); if (IS_ERR(crtc_state)) return PTR_ERR(crtc_state); @@ -132,6 +144,7 @@ static void mtk_plane_atomic_update(struct drm_plane *plane, state->pending.y = plane->state->dst.y1; state->pending.width = drm_rect_width(&plane->state->dst); state->pending.height = drm_rect_height(&plane->state->dst); + state->pending.rotation = plane->state->rotation; wmb(); /* Make sure the above parameters are set before update */ state->pending.dirty = true; } @@ -154,7 +167,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { }; int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, - unsigned long possible_crtcs, enum drm_plane_type type) + unsigned long possible_crtcs, enum drm_plane_type type, + unsigned int supported_rotations) { int err; @@ -166,6 +180,14 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, return err; } + if (supported_rotations & ~DRM_MODE_ROTATE_0) { + err = drm_plane_create_rotation_property(plane, + DRM_MODE_ROTATE_0, + supported_rotations); + if (err) + DRM_INFO("Create rotation property failed\n"); + } + drm_plane_helper_add(plane, &mtk_plane_helper_funcs); return 0; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.h b/drivers/gpu/drm/mediatek/mtk_drm_plane.h index 6f842df722c7..760885e35b27 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.h @@ -20,6 +20,7 @@ struct mtk_plane_pending_state { unsigned int y; unsigned int width; unsigned int height; + unsigned int rotation; bool dirty; }; @@ -35,6 +36,7 @@ to_mtk_plane_state(struct drm_plane_state *state) } int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, - unsigned long possible_crtcs, enum drm_plane_type type); + unsigned long possible_crtcs, enum drm_plane_type type, + unsigned int supported_rotations); #endif diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index 224afb666881..e9931bbbe846 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -16,6 +16,7 @@ #include <video/videomode.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> @@ -39,6 +40,7 @@ #define DSI_CON_CTRL 0x10 #define DSI_RESET BIT(0) #define DSI_EN BIT(1) +#define DPHY_RESET BIT(2) #define DSI_MODE_CTRL 0x14 #define MODE (3) @@ -72,6 +74,7 @@ #define DSI_VBP_NL 0x24 #define DSI_VFP_NL 0x28 #define DSI_VACT_NL 0x2C +#define DSI_SIZE_CON 0x38 #define DSI_HSA_WC 0x50 #define DSI_HBP_WC 0x54 #define DSI_HFP_WC 0x58 @@ -125,7 +128,10 @@ #define VM_CMD_EN BIT(0) #define TS_VFP_EN BIT(5) -#define DSI_CMDQ0 0x180 +#define DSI_SHADOW_DEBUG 0x190U +#define FORCE_COMMIT BIT(0) +#define BYPASS_SHADOW BIT(1) + #define CONFIG (0xff << 0) #define SHORT_PACKET 0 #define LONG_PACKET 2 @@ -134,12 +140,6 @@ #define DATA_0 (0xff << 16) #define DATA_1 (0xff << 24) -#define T_LPX 5 -#define T_HS_PREP 6 -#define T_HS_TRAIL 8 -#define T_HS_EXIT 7 -#define T_HS_ZERO 10 - #define NS_TO_CYCLE(n, c) ((n) / (c) + (((n) % (c)) ? 1 : 0)) #define MTK_DSI_HOST_IS_READ(type) \ @@ -148,8 +148,33 @@ (type == MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM) || \ (type == MIPI_DSI_DCS_READ)) +struct mtk_phy_timing { + u32 lpx; + u32 da_hs_prepare; + u32 da_hs_zero; + u32 da_hs_trail; + + u32 ta_go; + u32 ta_sure; + u32 ta_get; + u32 da_hs_exit; + + u32 clk_hs_zero; + u32 clk_hs_trail; + + u32 clk_hs_prepare; + u32 clk_hs_post; + u32 clk_hs_exit; +}; + struct phy; +struct mtk_dsi_driver_data { + const u32 reg_cmdq_off; + bool has_shadow_ctl; + bool has_size_ctl; +}; + struct mtk_dsi { struct mtk_ddp_comp ddp_comp; struct device *dev; @@ -172,10 +197,12 @@ struct mtk_dsi { enum mipi_dsi_pixel_format format; unsigned int lanes; struct videomode vm; + struct mtk_phy_timing phy_timing; int refcount; bool enabled; u32 irq_data; wait_queue_head_t irq_wait_queue; + const struct mtk_dsi_driver_data *driver_data; }; static inline struct mtk_dsi *encoder_to_dsi(struct drm_encoder *e) @@ -204,17 +231,36 @@ static void mtk_dsi_phy_timconfig(struct mtk_dsi *dsi) { u32 timcon0, timcon1, timcon2, timcon3; u32 ui, cycle_time; + struct mtk_phy_timing *timing = &dsi->phy_timing; + + ui = DIV_ROUND_UP(1000000000, dsi->data_rate); + cycle_time = div_u64(8000000000ULL, dsi->data_rate); + + timing->lpx = NS_TO_CYCLE(60, cycle_time); + timing->da_hs_prepare = NS_TO_CYCLE(50 + 5 * ui, cycle_time); + timing->da_hs_zero = NS_TO_CYCLE(110 + 6 * ui, cycle_time); + timing->da_hs_trail = NS_TO_CYCLE(77 + 4 * ui, cycle_time); + + timing->ta_go = 4 * timing->lpx; + timing->ta_sure = 3 * timing->lpx / 2; + timing->ta_get = 5 * timing->lpx; + timing->da_hs_exit = 2 * timing->lpx; - ui = 1000 / dsi->data_rate + 0x01; - cycle_time = 8000 / dsi->data_rate + 0x01; + timing->clk_hs_zero = NS_TO_CYCLE(336, cycle_time); + timing->clk_hs_trail = NS_TO_CYCLE(100, cycle_time) + 10; - timcon0 = T_LPX | T_HS_PREP << 8 | T_HS_ZERO << 16 | T_HS_TRAIL << 24; - timcon1 = 4 * T_LPX | (3 * T_LPX / 2) << 8 | 5 * T_LPX << 16 | - T_HS_EXIT << 24; - timcon2 = ((NS_TO_CYCLE(0x64, cycle_time) + 0xa) << 24) | - (NS_TO_CYCLE(0x150, cycle_time) << 16); - timcon3 = NS_TO_CYCLE(0x40, cycle_time) | (2 * T_LPX) << 16 | - NS_TO_CYCLE(80 + 52 * ui, cycle_time) << 8; + timing->clk_hs_prepare = NS_TO_CYCLE(64, cycle_time); + timing->clk_hs_post = NS_TO_CYCLE(80 + 52 * ui, cycle_time); + timing->clk_hs_exit = 2 * timing->lpx; + + timcon0 = timing->lpx | timing->da_hs_prepare << 8 | + timing->da_hs_zero << 16 | timing->da_hs_trail << 24; + timcon1 = timing->ta_go | timing->ta_sure << 8 | + timing->ta_get << 16 | timing->da_hs_exit << 24; + timcon2 = 1 << 8 | timing->clk_hs_zero << 16 | + timing->clk_hs_trail << 24; + timcon3 = timing->clk_hs_prepare | timing->clk_hs_post << 8 | + timing->clk_hs_exit << 16; writel(timcon0, dsi->regs + DSI_PHY_TIMECON0); writel(timcon1, dsi->regs + DSI_PHY_TIMECON1); @@ -238,6 +284,12 @@ static void mtk_dsi_reset_engine(struct mtk_dsi *dsi) mtk_dsi_mask(dsi, DSI_CON_CTRL, DSI_RESET, 0); } +static void mtk_dsi_reset_dphy(struct mtk_dsi *dsi) +{ + mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, DPHY_RESET); + mtk_dsi_mask(dsi, DSI_CON_CTRL, DPHY_RESET, 0); +} + static void mtk_dsi_clk_ulp_mode_enter(struct mtk_dsi *dsi) { mtk_dsi_mask(dsi, DSI_PHY_LCCON, LC_HS_TX_EN, 0); @@ -401,7 +453,8 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) u32 horizontal_sync_active_byte; u32 horizontal_backporch_byte; u32 horizontal_frontporch_byte; - u32 dsi_tmp_buf_bpp; + u32 dsi_tmp_buf_bpp, data_phy_cycles; + struct mtk_phy_timing *timing = &dsi->phy_timing; struct videomode *vm = &dsi->vm; @@ -415,6 +468,10 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL); writel(vm->vactive, dsi->regs + DSI_VACT_NL); + if (dsi->driver_data->has_size_ctl) + writel(vm->vactive << 16 | vm->hactive, + dsi->regs + DSI_SIZE_CON); + horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10); if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) @@ -424,7 +481,34 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi) horizontal_backporch_byte = ((vm->hback_porch + vm->hsync_len) * dsi_tmp_buf_bpp - 10); - horizontal_frontporch_byte = (vm->hfront_porch * dsi_tmp_buf_bpp - 12); + data_phy_cycles = timing->lpx + timing->da_hs_prepare + + timing->da_hs_zero + timing->da_hs_exit + 2; + + if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) { + if (vm->hfront_porch * dsi_tmp_buf_bpp > + data_phy_cycles * dsi->lanes + 18) { + horizontal_frontporch_byte = vm->hfront_porch * + dsi_tmp_buf_bpp - + data_phy_cycles * + dsi->lanes - 18; + } else { + DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); + horizontal_frontporch_byte = vm->hfront_porch * + dsi_tmp_buf_bpp; + } + } else { + if (vm->hfront_porch * dsi_tmp_buf_bpp > + data_phy_cycles * dsi->lanes + 12) { + horizontal_frontporch_byte = vm->hfront_porch * + dsi_tmp_buf_bpp - + data_phy_cycles * + dsi->lanes - 12; + } else { + DRM_WARN("HFP less than d-phy, FPS will under 60Hz\n"); + horizontal_frontporch_byte = vm->hfront_porch * + dsi_tmp_buf_bpp; + } + } writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC); writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC); @@ -522,10 +606,9 @@ static s32 mtk_dsi_switch_to_cmd_mode(struct mtk_dsi *dsi, u8 irq_flag, u32 t) static int mtk_dsi_poweron(struct mtk_dsi *dsi) { - struct device *dev = dsi->dev; + struct device *dev = dsi->host.dev; int ret; - u64 pixel_clock, total_bits; - u32 htotal, htotal_bits, bit_per_pixel, overhead_cycles, overhead_bits; + u32 bit_per_pixel; if (++dsi->refcount != 1) return 0; @@ -544,24 +627,8 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi) break; } - /** - * htotal_time = htotal * byte_per_pixel / num_lanes - * overhead_time = lpx + hs_prepare + hs_zero + hs_trail + hs_exit - * mipi_ratio = (htotal_time + overhead_time) / htotal_time - * data_rate = pixel_clock * bit_per_pixel * mipi_ratio / num_lanes; - */ - pixel_clock = dsi->vm.pixelclock; - htotal = dsi->vm.hactive + dsi->vm.hback_porch + dsi->vm.hfront_porch + - dsi->vm.hsync_len; - htotal_bits = htotal * bit_per_pixel; - - overhead_cycles = T_LPX + T_HS_PREP + T_HS_ZERO + T_HS_TRAIL + - T_HS_EXIT; - overhead_bits = overhead_cycles * dsi->lanes * 8; - total_bits = htotal_bits + overhead_bits; - - dsi->data_rate = DIV_ROUND_UP_ULL(pixel_clock * total_bits, - htotal * dsi->lanes); + dsi->data_rate = DIV_ROUND_UP_ULL(dsi->vm.pixelclock * bit_per_pixel, + dsi->lanes); ret = clk_set_rate(dsi->hs_clk, dsi->data_rate); if (ret < 0) { @@ -584,10 +651,17 @@ static int mtk_dsi_poweron(struct mtk_dsi *dsi) } mtk_dsi_enable(dsi); + + if (dsi->driver_data->has_shadow_ctl) + writel(FORCE_COMMIT | BYPASS_SHADOW, + dsi->regs + DSI_SHADOW_DEBUG); + mtk_dsi_reset_engine(dsi); mtk_dsi_phy_timconfig(dsi); mtk_dsi_rxtx_control(dsi); + usleep_range(30, 100); + mtk_dsi_reset_dphy(dsi); mtk_dsi_ps_control_vact(dsi); mtk_dsi_set_vm_cmd(dsi); mtk_dsi_config_vdo_timing(dsi); @@ -938,6 +1012,7 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg) const char *tx_buf = msg->tx_buf; u8 config, cmdq_size, cmdq_off, type = msg->type; u32 reg_val, cmdq_mask, i; + u32 reg_cmdq_off = dsi->driver_data->reg_cmdq_off; if (MTK_DSI_HOST_IS_READ(type)) config = BTA; @@ -957,9 +1032,11 @@ static void mtk_dsi_cmdq(struct mtk_dsi *dsi, const struct mipi_dsi_msg *msg) } for (i = 0; i < msg->tx_len; i++) - writeb(tx_buf[i], dsi->regs + DSI_CMDQ0 + cmdq_off + i); + mtk_dsi_mask(dsi, (reg_cmdq_off + cmdq_off + i) & (~0x3U), + (0xffUL << (((i + cmdq_off) & 3U) * 8U)), + tx_buf[i] << (((i + cmdq_off) & 3U) * 8U)); - mtk_dsi_mask(dsi, DSI_CMDQ0, cmdq_mask, reg_val); + mtk_dsi_mask(dsi, reg_cmdq_off, cmdq_mask, reg_val); mtk_dsi_mask(dsi, DSI_CMDQ_SIZE, CMDQ_SIZE, cmdq_size); } @@ -1049,12 +1126,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) return ret; } - ret = mipi_dsi_host_register(&dsi->host); - if (ret < 0) { - dev_err(dev, "failed to register DSI host: %d\n", ret); - goto err_ddp_comp_unregister; - } - ret = mtk_dsi_create_conn_enc(drm, dsi); if (ret) { DRM_ERROR("Encoder create failed with %d\n", ret); @@ -1064,8 +1135,6 @@ static int mtk_dsi_bind(struct device *dev, struct device *master, void *data) return 0; err_unregister: - mipi_dsi_host_unregister(&dsi->host); -err_ddp_comp_unregister: mtk_ddp_comp_unregister(drm, &dsi->ddp_comp); return ret; } @@ -1077,7 +1146,6 @@ static void mtk_dsi_unbind(struct device *dev, struct device *master, struct mtk_dsi *dsi = dev_get_drvdata(dev); mtk_dsi_destroy_conn_enc(dsi); - mipi_dsi_host_unregister(&dsi->host); mtk_ddp_comp_unregister(drm, &dsi->ddp_comp); } @@ -1101,31 +1169,38 @@ static int mtk_dsi_probe(struct platform_device *pdev) dsi->host.ops = &mtk_dsi_ops; dsi->host.dev = dev; + ret = mipi_dsi_host_register(&dsi->host); + if (ret < 0) { + dev_err(dev, "failed to register DSI host: %d\n", ret); + return ret; + } ret = drm_of_find_panel_or_bridge(dev->of_node, 0, 0, &dsi->panel, &dsi->bridge); if (ret) - return ret; + goto err_unregister_host; + + dsi->driver_data = of_device_get_match_data(dev); dsi->engine_clk = devm_clk_get(dev, "engine"); if (IS_ERR(dsi->engine_clk)) { ret = PTR_ERR(dsi->engine_clk); dev_err(dev, "Failed to get engine clock: %d\n", ret); - return ret; + goto err_unregister_host; } dsi->digital_clk = devm_clk_get(dev, "digital"); if (IS_ERR(dsi->digital_clk)) { ret = PTR_ERR(dsi->digital_clk); dev_err(dev, "Failed to get digital clock: %d\n", ret); - return ret; + goto err_unregister_host; } dsi->hs_clk = devm_clk_get(dev, "hs"); if (IS_ERR(dsi->hs_clk)) { ret = PTR_ERR(dsi->hs_clk); dev_err(dev, "Failed to get hs clock: %d\n", ret); - return ret; + goto err_unregister_host; } regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1133,33 +1208,35 @@ static int mtk_dsi_probe(struct platform_device *pdev) if (IS_ERR(dsi->regs)) { ret = PTR_ERR(dsi->regs); dev_err(dev, "Failed to ioremap memory: %d\n", ret); - return ret; + goto err_unregister_host; } dsi->phy = devm_phy_get(dev, "dphy"); if (IS_ERR(dsi->phy)) { ret = PTR_ERR(dsi->phy); dev_err(dev, "Failed to get MIPI-DPHY: %d\n", ret); - return ret; + goto err_unregister_host; } comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DSI); if (comp_id < 0) { dev_err(dev, "Failed to identify by alias: %d\n", comp_id); - return comp_id; + ret = comp_id; + goto err_unregister_host; } ret = mtk_ddp_comp_init(dev, dev->of_node, &dsi->ddp_comp, comp_id, &mtk_dsi_funcs); if (ret) { dev_err(dev, "Failed to initialize component: %d\n", ret); - return ret; + goto err_unregister_host; } irq_num = platform_get_irq(pdev, 0); if (irq_num < 0) { - dev_err(&pdev->dev, "failed to request dsi irq resource\n"); - return -EPROBE_DEFER; + dev_err(&pdev->dev, "failed to get dsi irq_num: %d\n", irq_num); + ret = irq_num; + goto err_unregister_host; } irq_set_status_flags(irq_num, IRQ_TYPE_LEVEL_LOW); @@ -1167,14 +1244,24 @@ static int mtk_dsi_probe(struct platform_device *pdev) IRQF_TRIGGER_LOW, dev_name(&pdev->dev), dsi); if (ret) { dev_err(&pdev->dev, "failed to request mediatek dsi irq\n"); - return -EPROBE_DEFER; + goto err_unregister_host; } init_waitqueue_head(&dsi->irq_wait_queue); platform_set_drvdata(pdev, dsi); - return component_add(&pdev->dev, &mtk_dsi_component_ops); + ret = component_add(&pdev->dev, &mtk_dsi_component_ops); + if (ret) { + dev_err(&pdev->dev, "failed to add component: %d\n", ret); + goto err_unregister_host; + } + + return 0; + +err_unregister_host: + mipi_dsi_host_unregister(&dsi->host); + return ret; } static int mtk_dsi_remove(struct platform_device *pdev) @@ -1183,13 +1270,32 @@ static int mtk_dsi_remove(struct platform_device *pdev) mtk_output_dsi_disable(dsi); component_del(&pdev->dev, &mtk_dsi_component_ops); + mipi_dsi_host_unregister(&dsi->host); return 0; } +static const struct mtk_dsi_driver_data mt8173_dsi_driver_data = { + .reg_cmdq_off = 0x200, +}; + +static const struct mtk_dsi_driver_data mt2701_dsi_driver_data = { + .reg_cmdq_off = 0x180, +}; + +static const struct mtk_dsi_driver_data mt8183_dsi_driver_data = { + .reg_cmdq_off = 0x200, + .has_shadow_ctl = true, + .has_size_ctl = true, +}; + static const struct of_device_id mtk_dsi_of_match[] = { - { .compatible = "mediatek,mt2701-dsi" }, - { .compatible = "mediatek,mt8173-dsi" }, + { .compatible = "mediatek,mt2701-dsi", + .data = &mt2701_dsi_driver_data }, + { .compatible = "mediatek,mt8173-dsi", + .data = &mt8173_dsi_driver_data }, + { .compatible = "mediatek,mt8183-dsi", + .data = &mt8183_dsi_driver_data }, { }, }; diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index ce91b61364eb..c79b1f855d89 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -23,6 +23,7 @@ #include <sound/hdmi-codec.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_edid.h> #include <drm/drm_print.h> diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c index 1842dc2caae9..e4d34484ecc8 100644 --- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c +++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c @@ -3,292 +3,39 @@ * Copyright (c) 2015 MediaTek Inc. */ -#include <linux/clk.h> -#include <linux/clk-provider.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/module.h> -#include <linux/of_device.h> -#include <linux/platform_device.h> -#include <linux/phy/phy.h> - -#define MIPITX_DSI_CON 0x00 -#define RG_DSI_LDOCORE_EN BIT(0) -#define RG_DSI_CKG_LDOOUT_EN BIT(1) -#define RG_DSI_BCLK_SEL (3 << 2) -#define RG_DSI_LD_IDX_SEL (7 << 4) -#define RG_DSI_PHYCLK_SEL (2 << 8) -#define RG_DSI_DSICLK_FREQ_SEL BIT(10) -#define RG_DSI_LPTX_CLMP_EN BIT(11) - -#define MIPITX_DSI_CLOCK_LANE 0x04 -#define MIPITX_DSI_DATA_LANE0 0x08 -#define MIPITX_DSI_DATA_LANE1 0x0c -#define MIPITX_DSI_DATA_LANE2 0x10 -#define MIPITX_DSI_DATA_LANE3 0x14 -#define RG_DSI_LNTx_LDOOUT_EN BIT(0) -#define RG_DSI_LNTx_CKLANE_EN BIT(1) -#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2) -#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3) -#define RG_DSI_LNTx_LPTX_IMINUS BIT(4) -#define RG_DSI_LNTx_LPCD_IPLUS BIT(5) -#define RG_DSI_LNTx_LPCD_IMINUS BIT(6) -#define RG_DSI_LNTx_RT_CODE (0xf << 8) - -#define MIPITX_DSI_TOP_CON 0x40 -#define RG_DSI_LNT_INTR_EN BIT(0) -#define RG_DSI_LNT_HS_BIAS_EN BIT(1) -#define RG_DSI_LNT_IMP_CAL_EN BIT(2) -#define RG_DSI_LNT_TESTMODE_EN BIT(3) -#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4) -#define RG_DSI_LNT_AIO_SEL (7 << 8) -#define RG_DSI_PAD_TIE_LOW_EN BIT(11) -#define RG_DSI_DEBUG_INPUT_EN BIT(12) -#define RG_DSI_PRESERVE (7 << 13) - -#define MIPITX_DSI_BG_CON 0x44 -#define RG_DSI_BG_CORE_EN BIT(0) -#define RG_DSI_BG_CKEN BIT(1) -#define RG_DSI_BG_DIV (0x3 << 2) -#define RG_DSI_BG_FAST_CHARGE BIT(4) -#define RG_DSI_VOUT_MSK (0x3ffff << 5) -#define RG_DSI_V12_SEL (7 << 5) -#define RG_DSI_V10_SEL (7 << 8) -#define RG_DSI_V072_SEL (7 << 11) -#define RG_DSI_V04_SEL (7 << 14) -#define RG_DSI_V032_SEL (7 << 17) -#define RG_DSI_V02_SEL (7 << 20) -#define RG_DSI_BG_R1_TRIM (0xf << 24) -#define RG_DSI_BG_R2_TRIM (0xf << 28) - -#define MIPITX_DSI_PLL_CON0 0x50 -#define RG_DSI_MPPLL_PLL_EN BIT(0) -#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1) -#define RG_DSI_MPPLL_PREDIV (3 << 1) -#define RG_DSI_MPPLL_TXDIV0 (3 << 3) -#define RG_DSI_MPPLL_TXDIV1 (3 << 5) -#define RG_DSI_MPPLL_POSDIV (7 << 7) -#define RG_DSI_MPPLL_MONVC_EN BIT(10) -#define RG_DSI_MPPLL_MONREF_EN BIT(11) -#define RG_DSI_MPPLL_VOD_EN BIT(12) - -#define MIPITX_DSI_PLL_CON1 0x54 -#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0) -#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1) -#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2) -#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16) - -#define MIPITX_DSI_PLL_CON2 0x58 - -#define MIPITX_DSI_PLL_TOP 0x64 -#define RG_DSI_MPPLL_PRESERVE (0xff << 8) - -#define MIPITX_DSI_PLL_PWR 0x68 -#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0) -#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1) -#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8) - -#define MIPITX_DSI_SW_CTRL 0x80 -#define SW_CTRL_EN BIT(0) - -#define MIPITX_DSI_SW_CTRL_CON0 0x84 -#define SW_LNTC_LPTX_PRE_OE BIT(0) -#define SW_LNTC_LPTX_OE BIT(1) -#define SW_LNTC_LPTX_P BIT(2) -#define SW_LNTC_LPTX_N BIT(3) -#define SW_LNTC_HSTX_PRE_OE BIT(4) -#define SW_LNTC_HSTX_OE BIT(5) -#define SW_LNTC_HSTX_ZEROCLK BIT(6) -#define SW_LNT0_LPTX_PRE_OE BIT(7) -#define SW_LNT0_LPTX_OE BIT(8) -#define SW_LNT0_LPTX_P BIT(9) -#define SW_LNT0_LPTX_N BIT(10) -#define SW_LNT0_HSTX_PRE_OE BIT(11) -#define SW_LNT0_HSTX_OE BIT(12) -#define SW_LNT0_LPRX_EN BIT(13) -#define SW_LNT1_LPTX_PRE_OE BIT(14) -#define SW_LNT1_LPTX_OE BIT(15) -#define SW_LNT1_LPTX_P BIT(16) -#define SW_LNT1_LPTX_N BIT(17) -#define SW_LNT1_HSTX_PRE_OE BIT(18) -#define SW_LNT1_HSTX_OE BIT(19) -#define SW_LNT2_LPTX_PRE_OE BIT(20) -#define SW_LNT2_LPTX_OE BIT(21) -#define SW_LNT2_LPTX_P BIT(22) -#define SW_LNT2_LPTX_N BIT(23) -#define SW_LNT2_HSTX_PRE_OE BIT(24) -#define SW_LNT2_HSTX_OE BIT(25) - -struct mtk_mipitx_data { - const u32 mppll_preserve; -}; - -struct mtk_mipi_tx { - struct device *dev; - void __iomem *regs; - u32 data_rate; - const struct mtk_mipitx_data *driver_data; - struct clk_hw pll_hw; - struct clk *pll; -}; +#include "mtk_mipi_tx.h" -static inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw) +inline struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw) { return container_of(hw, struct mtk_mipi_tx, pll_hw); } -static void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, - u32 bits) +void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, + u32 bits) { u32 temp = readl(mipi_tx->regs + offset); writel(temp & ~bits, mipi_tx->regs + offset); } -static void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, - u32 bits) +void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, + u32 bits) { u32 temp = readl(mipi_tx->regs + offset); writel(temp | bits, mipi_tx->regs + offset); } -static void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, - u32 mask, u32 data) +void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, + u32 mask, u32 data) { u32 temp = readl(mipi_tx->regs + offset); writel((temp & ~mask) | (data & mask), mipi_tx->regs + offset); } -static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw) -{ - struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); - u8 txdiv, txdiv0, txdiv1; - u64 pcw; - - dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate); - - if (mipi_tx->data_rate >= 500000000) { - txdiv = 1; - txdiv0 = 0; - txdiv1 = 0; - } else if (mipi_tx->data_rate >= 250000000) { - txdiv = 2; - txdiv0 = 1; - txdiv1 = 0; - } else if (mipi_tx->data_rate >= 125000000) { - txdiv = 4; - txdiv0 = 2; - txdiv1 = 0; - } else if (mipi_tx->data_rate > 62000000) { - txdiv = 8; - txdiv0 = 2; - txdiv1 = 1; - } else if (mipi_tx->data_rate >= 50000000) { - txdiv = 16; - txdiv0 = 2; - txdiv1 = 2; - } else { - return -EINVAL; - } - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON, - RG_DSI_VOUT_MSK | - RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN, - (4 << 20) | (4 << 17) | (4 << 14) | - (4 << 11) | (4 << 8) | (4 << 5) | - RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN); - - usleep_range(30, 100); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON, - RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN, - (8 << 4) | RG_DSI_LNT_HS_BIAS_EN); - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON, - RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR, - RG_DSI_MPPLL_SDM_PWR_ON | - RG_DSI_MPPLL_SDM_ISO_EN, - RG_DSI_MPPLL_SDM_PWR_ON); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, - RG_DSI_MPPLL_PLL_EN); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0, - RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 | - RG_DSI_MPPLL_PREDIV, - (txdiv0 << 3) | (txdiv1 << 5)); - - /* - * PLL PCW config - * PCW bit 24~30 = integer part of pcw - * PCW bit 0~23 = fractional part of pcw - * pcw = data_Rate*4*txdiv/(Ref_clk*2); - * Post DIV =4, so need data_Rate*4 - * Ref_clk is 26MHz - */ - pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24, - 26000000); - writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2); - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1, - RG_DSI_MPPLL_SDM_FRA_EN); - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN); - - usleep_range(20, 100); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1, - RG_DSI_MPPLL_SDM_SSC_EN); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP, - RG_DSI_MPPLL_PRESERVE, - mipi_tx->driver_data->mppll_preserve); - - return 0; -} - -static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw) -{ - struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); - - dev_dbg(mipi_tx->dev, "unprepare\n"); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, - RG_DSI_MPPLL_PLL_EN); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP, - RG_DSI_MPPLL_PRESERVE, 0); - - mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR, - RG_DSI_MPPLL_SDM_ISO_EN | - RG_DSI_MPPLL_SDM_PWR_ON, - RG_DSI_MPPLL_SDM_ISO_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON, - RG_DSI_LNT_HS_BIAS_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON, - RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON, - RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, - RG_DSI_MPPLL_DIV_MSK); -} - -static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *prate) -{ - return clamp_val(rate, 50000000, 1250000000); -} - -static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) +int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) { struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); @@ -299,37 +46,14 @@ static int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } -static unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) +unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) { struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); return mipi_tx->data_rate; } -static const struct clk_ops mtk_mipi_tx_pll_ops = { - .prepare = mtk_mipi_tx_pll_prepare, - .unprepare = mtk_mipi_tx_pll_unprepare, - .round_rate = mtk_mipi_tx_pll_round_rate, - .set_rate = mtk_mipi_tx_pll_set_rate, - .recalc_rate = mtk_mipi_tx_pll_recalc_rate, -}; - -static int mtk_mipi_tx_power_on_signal(struct phy *phy) -{ - struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); - u32 reg; - - for (reg = MIPITX_DSI_CLOCK_LANE; - reg <= MIPITX_DSI_DATA_LANE3; reg += 4) - mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN); - - mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON, - RG_DSI_PAD_TIE_LOW_EN); - - return 0; -} - static int mtk_mipi_tx_power_on(struct phy *phy) { struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); @@ -341,30 +65,16 @@ static int mtk_mipi_tx_power_on(struct phy *phy) return ret; /* Enable DSI Lane LDO outputs, disable pad tie low */ - mtk_mipi_tx_power_on_signal(phy); - + mipi_tx->driver_data->mipi_tx_enable_signal(phy); return 0; } -static void mtk_mipi_tx_power_off_signal(struct phy *phy) -{ - struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); - u32 reg; - - mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON, - RG_DSI_PAD_TIE_LOW_EN); - - for (reg = MIPITX_DSI_CLOCK_LANE; - reg <= MIPITX_DSI_DATA_LANE3; reg += 4) - mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN); -} - static int mtk_mipi_tx_power_off(struct phy *phy) { struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); /* Enable pad tie low, disable DSI Lane LDO outputs */ - mtk_mipi_tx_power_off_signal(phy); + mipi_tx->driver_data->mipi_tx_disable_signal(phy); /* Disable PLL and power down core */ clk_disable_unprepare(mipi_tx->pll); @@ -383,10 +93,9 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct mtk_mipi_tx *mipi_tx; struct resource *mem; - struct clk *ref_clk; const char *ref_clk_name; + struct clk *ref_clk; struct clk_init_data clk_init = { - .ops = &mtk_mipi_tx_pll_ops, .num_parents = 1, .parent_names = (const char * const *)&ref_clk_name, .flags = CLK_SET_RATE_GATE, @@ -400,6 +109,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) return -ENOMEM; mipi_tx->driver_data = of_device_get_match_data(dev); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); mipi_tx->regs = devm_ioremap_resource(dev, mem); if (IS_ERR(mipi_tx->regs)) { @@ -414,6 +124,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) dev_err(dev, "Failed to get reference clock: %d\n", ret); return ret; } + ref_clk_name = __clk_get_name(ref_clk); ret = of_property_read_string(dev->of_node, "clock-output-names", @@ -423,6 +134,8 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) return ret; } + clk_init.ops = mipi_tx->driver_data->mipi_tx_clk_ops; + mipi_tx->pll_hw.init = &clk_init; mipi_tx->pll = devm_clk_register(dev, &mipi_tx->pll_hw); if (IS_ERR(mipi_tx->pll)) { @@ -457,20 +170,14 @@ static int mtk_mipi_tx_remove(struct platform_device *pdev) return 0; } -static const struct mtk_mipitx_data mt2701_mipitx_data = { - .mppll_preserve = (3 << 8) -}; - -static const struct mtk_mipitx_data mt8173_mipitx_data = { - .mppll_preserve = (0 << 8) -}; - static const struct of_device_id mtk_mipi_tx_match[] = { { .compatible = "mediatek,mt2701-mipi-tx", .data = &mt2701_mipitx_data }, { .compatible = "mediatek,mt8173-mipi-tx", .data = &mt8173_mipitx_data }, - {}, + { .compatible = "mediatek,mt8183-mipi-tx", + .data = &mt8183_mipitx_data }, + { }, }; struct platform_driver mtk_mipi_tx_driver = { @@ -481,3 +188,4 @@ struct platform_driver mtk_mipi_tx_driver = { .of_match_table = mtk_mipi_tx_match, }, }; + diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.h b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h new file mode 100644 index 000000000000..413f35d86219 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: Jitao Shi <jitao.shi@mediatek.com> + */ + +#ifndef _MTK_MIPI_TX_H +#define _MTK_MIPI_TX_H + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/phy/phy.h> + +struct mtk_mipitx_data { + const u32 mppll_preserve; + const struct clk_ops *mipi_tx_clk_ops; + void (*mipi_tx_enable_signal)(struct phy *phy); + void (*mipi_tx_disable_signal)(struct phy *phy); +}; + +struct mtk_mipi_tx { + struct device *dev; + void __iomem *regs; + u32 data_rate; + const struct mtk_mipitx_data *driver_data; + struct clk_hw pll_hw; + struct clk *pll; +}; + +struct mtk_mipi_tx *mtk_mipi_tx_from_clk_hw(struct clk_hw *hw); +void mtk_mipi_tx_clear_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits); +void mtk_mipi_tx_set_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 bits); +void mtk_mipi_tx_update_bits(struct mtk_mipi_tx *mipi_tx, u32 offset, u32 mask, + u32 data); +int mtk_mipi_tx_pll_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate); +unsigned long mtk_mipi_tx_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate); + +extern const struct mtk_mipitx_data mt2701_mipitx_data; +extern const struct mtk_mipitx_data mt8173_mipitx_data; +extern const struct mtk_mipitx_data mt8183_mipitx_data; + +#endif diff --git a/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c new file mode 100644 index 000000000000..f18db14d8b63 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_mt8173_mipi_tx.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: jitao.shi <jitao.shi@mediatek.com> + */ + +#include "mtk_mipi_tx.h" + +#define MIPITX_DSI_CON 0x00 +#define RG_DSI_LDOCORE_EN BIT(0) +#define RG_DSI_CKG_LDOOUT_EN BIT(1) +#define RG_DSI_BCLK_SEL (3 << 2) +#define RG_DSI_LD_IDX_SEL (7 << 4) +#define RG_DSI_PHYCLK_SEL (2 << 8) +#define RG_DSI_DSICLK_FREQ_SEL BIT(10) +#define RG_DSI_LPTX_CLMP_EN BIT(11) + +#define MIPITX_DSI_CLOCK_LANE 0x04 +#define MIPITX_DSI_DATA_LANE0 0x08 +#define MIPITX_DSI_DATA_LANE1 0x0c +#define MIPITX_DSI_DATA_LANE2 0x10 +#define MIPITX_DSI_DATA_LANE3 0x14 +#define RG_DSI_LNTx_LDOOUT_EN BIT(0) +#define RG_DSI_LNTx_CKLANE_EN BIT(1) +#define RG_DSI_LNTx_LPTX_IPLUS1 BIT(2) +#define RG_DSI_LNTx_LPTX_IPLUS2 BIT(3) +#define RG_DSI_LNTx_LPTX_IMINUS BIT(4) +#define RG_DSI_LNTx_LPCD_IPLUS BIT(5) +#define RG_DSI_LNTx_LPCD_IMINUS BIT(6) +#define RG_DSI_LNTx_RT_CODE (0xf << 8) + +#define MIPITX_DSI_TOP_CON 0x40 +#define RG_DSI_LNT_INTR_EN BIT(0) +#define RG_DSI_LNT_HS_BIAS_EN BIT(1) +#define RG_DSI_LNT_IMP_CAL_EN BIT(2) +#define RG_DSI_LNT_TESTMODE_EN BIT(3) +#define RG_DSI_LNT_IMP_CAL_CODE (0xf << 4) +#define RG_DSI_LNT_AIO_SEL (7 << 8) +#define RG_DSI_PAD_TIE_LOW_EN BIT(11) +#define RG_DSI_DEBUG_INPUT_EN BIT(12) +#define RG_DSI_PRESERVE (7 << 13) + +#define MIPITX_DSI_BG_CON 0x44 +#define RG_DSI_BG_CORE_EN BIT(0) +#define RG_DSI_BG_CKEN BIT(1) +#define RG_DSI_BG_DIV (0x3 << 2) +#define RG_DSI_BG_FAST_CHARGE BIT(4) +#define RG_DSI_VOUT_MSK (0x3ffff << 5) +#define RG_DSI_V12_SEL (7 << 5) +#define RG_DSI_V10_SEL (7 << 8) +#define RG_DSI_V072_SEL (7 << 11) +#define RG_DSI_V04_SEL (7 << 14) +#define RG_DSI_V032_SEL (7 << 17) +#define RG_DSI_V02_SEL (7 << 20) +#define RG_DSI_BG_R1_TRIM (0xf << 24) +#define RG_DSI_BG_R2_TRIM (0xf << 28) + +#define MIPITX_DSI_PLL_CON0 0x50 +#define RG_DSI_MPPLL_PLL_EN BIT(0) +#define RG_DSI_MPPLL_DIV_MSK (0x1ff << 1) +#define RG_DSI_MPPLL_PREDIV (3 << 1) +#define RG_DSI_MPPLL_TXDIV0 (3 << 3) +#define RG_DSI_MPPLL_TXDIV1 (3 << 5) +#define RG_DSI_MPPLL_POSDIV (7 << 7) +#define RG_DSI_MPPLL_MONVC_EN BIT(10) +#define RG_DSI_MPPLL_MONREF_EN BIT(11) +#define RG_DSI_MPPLL_VOD_EN BIT(12) + +#define MIPITX_DSI_PLL_CON1 0x54 +#define RG_DSI_MPPLL_SDM_FRA_EN BIT(0) +#define RG_DSI_MPPLL_SDM_SSC_PH_INIT BIT(1) +#define RG_DSI_MPPLL_SDM_SSC_EN BIT(2) +#define RG_DSI_MPPLL_SDM_SSC_PRD (0xffff << 16) + +#define MIPITX_DSI_PLL_CON2 0x58 + +#define MIPITX_DSI_PLL_TOP 0x64 +#define RG_DSI_MPPLL_PRESERVE (0xff << 8) + +#define MIPITX_DSI_PLL_PWR 0x68 +#define RG_DSI_MPPLL_SDM_PWR_ON BIT(0) +#define RG_DSI_MPPLL_SDM_ISO_EN BIT(1) +#define RG_DSI_MPPLL_SDM_PWR_ACK BIT(8) + +#define MIPITX_DSI_SW_CTRL 0x80 +#define SW_CTRL_EN BIT(0) + +#define MIPITX_DSI_SW_CTRL_CON0 0x84 +#define SW_LNTC_LPTX_PRE_OE BIT(0) +#define SW_LNTC_LPTX_OE BIT(1) +#define SW_LNTC_LPTX_P BIT(2) +#define SW_LNTC_LPTX_N BIT(3) +#define SW_LNTC_HSTX_PRE_OE BIT(4) +#define SW_LNTC_HSTX_OE BIT(5) +#define SW_LNTC_HSTX_ZEROCLK BIT(6) +#define SW_LNT0_LPTX_PRE_OE BIT(7) +#define SW_LNT0_LPTX_OE BIT(8) +#define SW_LNT0_LPTX_P BIT(9) +#define SW_LNT0_LPTX_N BIT(10) +#define SW_LNT0_HSTX_PRE_OE BIT(11) +#define SW_LNT0_HSTX_OE BIT(12) +#define SW_LNT0_LPRX_EN BIT(13) +#define SW_LNT1_LPTX_PRE_OE BIT(14) +#define SW_LNT1_LPTX_OE BIT(15) +#define SW_LNT1_LPTX_P BIT(16) +#define SW_LNT1_LPTX_N BIT(17) +#define SW_LNT1_HSTX_PRE_OE BIT(18) +#define SW_LNT1_HSTX_OE BIT(19) +#define SW_LNT2_LPTX_PRE_OE BIT(20) +#define SW_LNT2_LPTX_OE BIT(21) +#define SW_LNT2_LPTX_P BIT(22) +#define SW_LNT2_LPTX_N BIT(23) +#define SW_LNT2_HSTX_PRE_OE BIT(24) +#define SW_LNT2_HSTX_OE BIT(25) + +static int mtk_mipi_tx_pll_prepare(struct clk_hw *hw) +{ + struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); + u8 txdiv, txdiv0, txdiv1; + u64 pcw; + + dev_dbg(mipi_tx->dev, "prepare: %u Hz\n", mipi_tx->data_rate); + + if (mipi_tx->data_rate >= 500000000) { + txdiv = 1; + txdiv0 = 0; + txdiv1 = 0; + } else if (mipi_tx->data_rate >= 250000000) { + txdiv = 2; + txdiv0 = 1; + txdiv1 = 0; + } else if (mipi_tx->data_rate >= 125000000) { + txdiv = 4; + txdiv0 = 2; + txdiv1 = 0; + } else if (mipi_tx->data_rate > 62000000) { + txdiv = 8; + txdiv0 = 2; + txdiv1 = 1; + } else if (mipi_tx->data_rate >= 50000000) { + txdiv = 16; + txdiv0 = 2; + txdiv1 = 2; + } else { + return -EINVAL; + } + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_BG_CON, + RG_DSI_VOUT_MSK | + RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN, + (4 << 20) | (4 << 17) | (4 << 14) | + (4 << 11) | (4 << 8) | (4 << 5) | + RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN); + + usleep_range(30, 100); + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_TOP_CON, + RG_DSI_LNT_IMP_CAL_CODE | RG_DSI_LNT_HS_BIAS_EN, + (8 << 4) | RG_DSI_LNT_HS_BIAS_EN); + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_CON, + RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN); + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR, + RG_DSI_MPPLL_SDM_PWR_ON | + RG_DSI_MPPLL_SDM_ISO_EN, + RG_DSI_MPPLL_SDM_PWR_ON); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, + RG_DSI_MPPLL_PLL_EN); + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_CON0, + RG_DSI_MPPLL_TXDIV0 | RG_DSI_MPPLL_TXDIV1 | + RG_DSI_MPPLL_PREDIV, + (txdiv0 << 3) | (txdiv1 << 5)); + + /* + * PLL PCW config + * PCW bit 24~30 = integer part of pcw + * PCW bit 0~23 = fractional part of pcw + * pcw = data_Rate*4*txdiv/(Ref_clk*2); + * Post DIV =4, so need data_Rate*4 + * Ref_clk is 26MHz + */ + pcw = div_u64(((u64)mipi_tx->data_rate * 2 * txdiv) << 24, + 26000000); + writel(pcw, mipi_tx->regs + MIPITX_DSI_PLL_CON2); + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON1, + RG_DSI_MPPLL_SDM_FRA_EN); + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_PLL_CON0, RG_DSI_MPPLL_PLL_EN); + + usleep_range(20, 100); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON1, + RG_DSI_MPPLL_SDM_SSC_EN); + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP, + RG_DSI_MPPLL_PRESERVE, + mipi_tx->driver_data->mppll_preserve); + + return 0; +} + +static void mtk_mipi_tx_pll_unprepare(struct clk_hw *hw) +{ + struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); + + dev_dbg(mipi_tx->dev, "unprepare\n"); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, + RG_DSI_MPPLL_PLL_EN); + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_TOP, + RG_DSI_MPPLL_PRESERVE, 0); + + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_DSI_PLL_PWR, + RG_DSI_MPPLL_SDM_ISO_EN | + RG_DSI_MPPLL_SDM_PWR_ON, + RG_DSI_MPPLL_SDM_ISO_EN); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON, + RG_DSI_LNT_HS_BIAS_EN); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_CON, + RG_DSI_CKG_LDOOUT_EN | RG_DSI_LDOCORE_EN); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_BG_CON, + RG_DSI_BG_CKEN | RG_DSI_BG_CORE_EN); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_PLL_CON0, + RG_DSI_MPPLL_DIV_MSK); +} + +static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + return clamp_val(rate, 50000000, 1250000000); +} + +static const struct clk_ops mtk_mipi_tx_pll_ops = { + .prepare = mtk_mipi_tx_pll_prepare, + .unprepare = mtk_mipi_tx_pll_unprepare, + .round_rate = mtk_mipi_tx_pll_round_rate, + .set_rate = mtk_mipi_tx_pll_set_rate, + .recalc_rate = mtk_mipi_tx_pll_recalc_rate, +}; + +static void mtk_mipi_tx_power_on_signal(struct phy *phy) +{ + struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); + u32 reg; + + for (reg = MIPITX_DSI_CLOCK_LANE; + reg <= MIPITX_DSI_DATA_LANE3; reg += 4) + mtk_mipi_tx_set_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_DSI_TOP_CON, + RG_DSI_PAD_TIE_LOW_EN); +} + +static void mtk_mipi_tx_power_off_signal(struct phy *phy) +{ + struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); + u32 reg; + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_DSI_TOP_CON, + RG_DSI_PAD_TIE_LOW_EN); + + for (reg = MIPITX_DSI_CLOCK_LANE; + reg <= MIPITX_DSI_DATA_LANE3; reg += 4) + mtk_mipi_tx_clear_bits(mipi_tx, reg, RG_DSI_LNTx_LDOOUT_EN); +} + +const struct mtk_mipitx_data mt2701_mipitx_data = { + .mppll_preserve = (3 << 8), + .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops, + .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal, + .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal, +}; + +const struct mtk_mipitx_data mt8173_mipitx_data = { + .mppll_preserve = (0 << 8), + .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops, + .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal, + .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c new file mode 100644 index 000000000000..91f08a351fd0 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_mt8183_mipi_tx.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2019 MediaTek Inc. + * Author: jitao.shi <jitao.shi@mediatek.com> + */ + +#include "mtk_mipi_tx.h" + +#define MIPITX_LANE_CON 0x000c +#define RG_DSI_CPHY_T1DRV_EN BIT(0) +#define RG_DSI_ANA_CK_SEL BIT(1) +#define RG_DSI_PHY_CK_SEL BIT(2) +#define RG_DSI_CPHY_EN BIT(3) +#define RG_DSI_PHYCK_INV_EN BIT(4) +#define RG_DSI_PWR04_EN BIT(5) +#define RG_DSI_BG_LPF_EN BIT(6) +#define RG_DSI_BG_CORE_EN BIT(7) +#define RG_DSI_PAD_TIEL_SEL BIT(8) + +#define MIPITX_PLL_PWR 0x0028 +#define MIPITX_PLL_CON0 0x002c +#define MIPITX_PLL_CON1 0x0030 +#define MIPITX_PLL_CON2 0x0034 +#define MIPITX_PLL_CON3 0x0038 +#define MIPITX_PLL_CON4 0x003c +#define RG_DSI_PLL_IBIAS (3 << 10) + +#define MIPITX_D2_SW_CTL_EN 0x0144 +#define MIPITX_D0_SW_CTL_EN 0x0244 +#define MIPITX_CK_CKMODE_EN 0x0328 +#define DSI_CK_CKMODE_EN BIT(0) +#define MIPITX_CK_SW_CTL_EN 0x0344 +#define MIPITX_D1_SW_CTL_EN 0x0444 +#define MIPITX_D3_SW_CTL_EN 0x0544 +#define DSI_SW_CTL_EN BIT(0) +#define AD_DSI_PLL_SDM_PWR_ON BIT(0) +#define AD_DSI_PLL_SDM_ISO_EN BIT(1) + +#define RG_DSI_PLL_EN BIT(4) +#define RG_DSI_PLL_POSDIV (0x7 << 8) + +static int mtk_mipi_tx_pll_enable(struct clk_hw *hw) +{ + struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); + unsigned int txdiv, txdiv0; + u64 pcw; + + dev_dbg(mipi_tx->dev, "enable: %u bps\n", mipi_tx->data_rate); + + if (mipi_tx->data_rate >= 2000000000) { + txdiv = 1; + txdiv0 = 0; + } else if (mipi_tx->data_rate >= 1000000000) { + txdiv = 2; + txdiv0 = 1; + } else if (mipi_tx->data_rate >= 500000000) { + txdiv = 4; + txdiv0 = 2; + } else if (mipi_tx->data_rate > 250000000) { + txdiv = 8; + txdiv0 = 3; + } else if (mipi_tx->data_rate >= 125000000) { + txdiv = 16; + txdiv0 = 4; + } else { + return -EINVAL; + } + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON4, RG_DSI_PLL_IBIAS); + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN); + udelay(1); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN); + pcw = div_u64(((u64)mipi_tx->data_rate * txdiv) << 24, 26000000); + writel(pcw, mipi_tx->regs + MIPITX_PLL_CON0); + mtk_mipi_tx_update_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_POSDIV, + txdiv0 << 8); + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN); + + return 0; +} + +static void mtk_mipi_tx_pll_disable(struct clk_hw *hw) +{ + struct mtk_mipi_tx *mipi_tx = mtk_mipi_tx_from_clk_hw(hw); + + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_CON1, RG_DSI_PLL_EN); + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_ISO_EN); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_PLL_PWR, AD_DSI_PLL_SDM_PWR_ON); +} + +static long mtk_mipi_tx_pll_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + return clamp_val(rate, 50000000, 1600000000); +} + +static const struct clk_ops mtk_mipi_tx_pll_ops = { + .enable = mtk_mipi_tx_pll_enable, + .disable = mtk_mipi_tx_pll_disable, + .round_rate = mtk_mipi_tx_pll_round_rate, + .set_rate = mtk_mipi_tx_pll_set_rate, + .recalc_rate = mtk_mipi_tx_pll_recalc_rate, +}; + +static void mtk_mipi_tx_power_on_signal(struct phy *phy) +{ + struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); + + /* BG_LPF_EN / BG_CORE_EN */ + writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN, + mipi_tx->regs + MIPITX_LANE_CON); + usleep_range(30, 100); + writel(RG_DSI_BG_CORE_EN | RG_DSI_BG_LPF_EN, + mipi_tx->regs + MIPITX_LANE_CON); + + /* Switch OFF each Lane */ + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_clear_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN); + + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_CKMODE_EN, DSI_CK_CKMODE_EN); +} + +static void mtk_mipi_tx_power_off_signal(struct phy *phy) +{ + struct mtk_mipi_tx *mipi_tx = phy_get_drvdata(phy); + + /* Switch ON each Lane */ + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D0_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D1_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D2_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_D3_SW_CTL_EN, DSI_SW_CTL_EN); + mtk_mipi_tx_set_bits(mipi_tx, MIPITX_CK_SW_CTL_EN, DSI_SW_CTL_EN); + + writel(RG_DSI_PAD_TIEL_SEL | RG_DSI_BG_CORE_EN, + mipi_tx->regs + MIPITX_LANE_CON); + writel(RG_DSI_PAD_TIEL_SEL, mipi_tx->regs + MIPITX_LANE_CON); +} + +const struct mtk_mipitx_data mt8183_mipitx_data = { + .mipi_tx_clk_ops = &mtk_mipi_tx_pll_ops, + .mipi_tx_enable_signal = mtk_mipi_tx_power_on_signal, + .mipi_tx_disable_signal = mtk_mipi_tx_power_off_signal, +}; diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index a24f8dec5adc..397c33182f4f 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -372,6 +372,33 @@ static const struct component_master_ops meson_drv_master_ops = { .unbind = meson_drv_unbind, }; +static int __maybe_unused meson_drv_pm_suspend(struct device *dev) +{ + struct meson_drm *priv = dev_get_drvdata(dev); + + if (!priv) + return 0; + + return drm_mode_config_helper_suspend(priv->drm); +} + +static int __maybe_unused meson_drv_pm_resume(struct device *dev) +{ + struct meson_drm *priv = dev_get_drvdata(dev); + + if (!priv) + return 0; + + meson_vpu_init(priv); + meson_venc_init(priv); + meson_vpp_init(priv); + meson_viu_init(priv); + + drm_mode_config_helper_resume(priv->drm); + + return 0; +} + static int compare_of(struct device *dev, void *data) { DRM_DEBUG_DRIVER("Comparing of node %pOF with %pOF\n", @@ -467,11 +494,16 @@ static const struct of_device_id dt_match[] = { }; MODULE_DEVICE_TABLE(of, dt_match); +static const struct dev_pm_ops meson_drv_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(meson_drv_pm_suspend, meson_drv_pm_resume) +}; + static struct platform_driver meson_drm_platform_driver = { .probe = meson_drv_probe, .driver = { .name = "meson-drm", .of_match_table = dt_match, + .pm = &meson_drv_pm_ops, }, }; diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 68bbd987147b..3bb7ffe5fc39 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -802,6 +802,47 @@ static bool meson_hdmi_connector_is_available(struct device *dev) return false; } +static void meson_dw_hdmi_init(struct meson_dw_hdmi *meson_dw_hdmi) +{ + struct meson_drm *priv = meson_dw_hdmi->priv; + + /* Enable clocks */ + regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100); + + /* Bring HDMITX MEM output of power down */ + regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0); + + /* Reset HDMITX APB & TX & PHY */ + reset_control_reset(meson_dw_hdmi->hdmitx_apb); + reset_control_reset(meson_dw_hdmi->hdmitx_ctrl); + reset_control_reset(meson_dw_hdmi->hdmitx_phy); + + /* Enable APB3 fail on error */ + if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { + writel_bits_relaxed(BIT(15), BIT(15), + meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG); + writel_bits_relaxed(BIT(15), BIT(15), + meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG); + } + + /* Bring out of reset */ + meson_dw_hdmi->data->top_write(meson_dw_hdmi, + HDMITX_TOP_SW_RESET, 0); + + msleep(20); + + meson_dw_hdmi->data->top_write(meson_dw_hdmi, + HDMITX_TOP_CLK_CNTL, 0xff); + + /* Enable HDMI-TX Interrupt */ + meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, + HDMITX_TOP_INTR_CORE); + + meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN, + HDMITX_TOP_INTR_CORE); + +} + static int meson_dw_hdmi_bind(struct device *dev, struct device *master, void *data) { @@ -925,40 +966,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, DRM_DEBUG_DRIVER("encoder initialized\n"); - /* Enable clocks */ - regmap_update_bits(priv->hhi, HHI_HDMI_CLK_CNTL, 0xffff, 0x100); - - /* Bring HDMITX MEM output of power down */ - regmap_update_bits(priv->hhi, HHI_MEM_PD_REG0, 0xff << 8, 0); - - /* Reset HDMITX APB & TX & PHY */ - reset_control_reset(meson_dw_hdmi->hdmitx_apb); - reset_control_reset(meson_dw_hdmi->hdmitx_ctrl); - reset_control_reset(meson_dw_hdmi->hdmitx_phy); - - /* Enable APB3 fail on error */ - if (!meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { - writel_bits_relaxed(BIT(15), BIT(15), - meson_dw_hdmi->hdmitx + HDMITX_TOP_CTRL_REG); - writel_bits_relaxed(BIT(15), BIT(15), - meson_dw_hdmi->hdmitx + HDMITX_DWC_CTRL_REG); - } - - /* Bring out of reset */ - meson_dw_hdmi->data->top_write(meson_dw_hdmi, - HDMITX_TOP_SW_RESET, 0); - - msleep(20); - - meson_dw_hdmi->data->top_write(meson_dw_hdmi, - HDMITX_TOP_CLK_CNTL, 0xff); - - /* Enable HDMI-TX Interrupt */ - meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_STAT_CLR, - HDMITX_TOP_INTR_CORE); - - meson_dw_hdmi->data->top_write(meson_dw_hdmi, HDMITX_TOP_INTR_MASKN, - HDMITX_TOP_INTR_CORE); + meson_dw_hdmi_init(meson_dw_hdmi); /* Bridge / Connector */ @@ -969,6 +977,11 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24; dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709; + if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") || + dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") || + dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-g12a-dw-hdmi")) + dw_plat_data->use_drm_infoframe = true; + platform_set_drvdata(pdev, meson_dw_hdmi); meson_dw_hdmi->hdmi = dw_hdmi_bind(pdev, encoder, @@ -994,6 +1007,34 @@ static const struct component_ops meson_dw_hdmi_ops = { .unbind = meson_dw_hdmi_unbind, }; +static int __maybe_unused meson_dw_hdmi_pm_suspend(struct device *dev) +{ + struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev); + + if (!meson_dw_hdmi) + return 0; + + /* Reset TOP */ + meson_dw_hdmi->data->top_write(meson_dw_hdmi, + HDMITX_TOP_SW_RESET, 0); + + return 0; +} + +static int __maybe_unused meson_dw_hdmi_pm_resume(struct device *dev) +{ + struct meson_dw_hdmi *meson_dw_hdmi = dev_get_drvdata(dev); + + if (!meson_dw_hdmi) + return 0; + + meson_dw_hdmi_init(meson_dw_hdmi); + + dw_hdmi_resume(meson_dw_hdmi->hdmi); + + return 0; +} + static int meson_dw_hdmi_probe(struct platform_device *pdev) { return component_add(&pdev->dev, &meson_dw_hdmi_ops); @@ -1006,6 +1047,11 @@ static int meson_dw_hdmi_remove(struct platform_device *pdev) return 0; } +static const struct dev_pm_ops meson_dw_hdmi_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(meson_dw_hdmi_pm_suspend, + meson_dw_hdmi_pm_resume) +}; + static const struct of_device_id meson_dw_hdmi_of_table[] = { { .compatible = "amlogic,meson-gxbb-dw-hdmi", .data = &meson_dw_hdmi_gx_data }, @@ -1025,6 +1071,7 @@ static struct platform_driver meson_dw_hdmi_platform_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = meson_dw_hdmi_of_table, + .pm = &meson_dw_hdmi_pm_ops, }, }; module_platform_driver(meson_dw_hdmi_platform_driver); diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c index ac491a781952..f690793ae2d5 100644 --- a/drivers/gpu/drm/meson/meson_vclk.c +++ b/drivers/gpu/drm/meson/meson_vclk.c @@ -638,13 +638,18 @@ static bool meson_hdmi_pll_validate_params(struct meson_drm *priv, if (frac >= HDMI_FRAC_MAX_GXBB) return false; } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXM) || - meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL) || - meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { + meson_vpu_is_compatible(priv, VPU_COMPATIBLE_GXL)) { /* Empiric supported min/max dividers */ if (m < 106 || m > 247) return false; if (frac >= HDMI_FRAC_MAX_GXL) return false; + } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) { + /* Empiric supported min/max dividers */ + if (m < 106 || m > 247) + return false; + if (frac >= HDMI_FRAC_MAX_G12A) + return false; } return true; diff --git a/drivers/gpu/drm/mgag200/Kconfig b/drivers/gpu/drm/mgag200/Kconfig index 76fee0fbdcae..aed11f4f4c55 100644 --- a/drivers/gpu/drm/mgag200/Kconfig +++ b/drivers/gpu/drm/mgag200/Kconfig @@ -4,6 +4,8 @@ config DRM_MGAG200 depends on DRM && PCI && MMU select DRM_KMS_HELPER select DRM_VRAM_HELPER + select DRM_TTM + select DRM_TTM_HELPER help This is a KMS driver for the MGA G200 server chips, it does not support the original MGA G200 or any of the desktop diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c index 289ce3e29032..79711dbb5b03 100644 --- a/drivers/gpu/drm/mgag200/mgag200_cursor.c +++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c @@ -12,35 +12,10 @@ static bool warn_transparent = true; static bool warn_palette = true; -/* - Hide the cursor off screen. We can't disable the cursor hardware because it - takes too long to re-activate and causes momentary corruption -*/ -static void mga_hide_cursor(struct mga_device *mdev) -{ - WREG8(MGA_CURPOSXL, 0); - WREG8(MGA_CURPOSXH, 0); - if (mdev->cursor.pixels_current) - drm_gem_vram_unpin(mdev->cursor.pixels_current); - mdev->cursor.pixels_current = NULL; -} - -int mga_crtc_cursor_set(struct drm_crtc *crtc, - struct drm_file *file_priv, - uint32_t handle, - uint32_t width, - uint32_t height) +static int mgag200_cursor_update(struct mga_device *mdev, void *dst, void *src, + unsigned int width, unsigned int height) { - struct drm_device *dev = crtc->dev; - struct mga_device *mdev = (struct mga_device *)dev->dev_private; - struct drm_gem_vram_object *pixels_1 = mdev->cursor.pixels_1; - struct drm_gem_vram_object *pixels_2 = mdev->cursor.pixels_2; - struct drm_gem_vram_object *pixels_current = mdev->cursor.pixels_current; - struct drm_gem_vram_object *pixels_next; - struct drm_gem_object *obj; - struct drm_gem_vram_object *gbo = NULL; - int ret = 0; - u8 *src, *dst; + struct drm_device *dev = mdev->dev; unsigned int i, row, col; uint32_t colour_set[16]; uint32_t *next_space = &colour_set[0]; @@ -48,79 +23,9 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, uint32_t this_colour; bool found = false; int colour_count = 0; - s64 gpu_addr; - u64 dst_gpu; u8 reg_index; u8 this_row[48]; - if (!pixels_1 || !pixels_2) { - WREG8(MGA_CURPOSXL, 0); - WREG8(MGA_CURPOSXH, 0); - return -ENOTSUPP; /* Didn't allocate space for cursors */ - } - - if (WARN_ON(pixels_current && - pixels_1 != pixels_current && - pixels_2 != pixels_current)) { - return -ENOTSUPP; /* inconsistent state */ - } - - if (!handle || !file_priv) { - mga_hide_cursor(mdev); - return 0; - } - - if (width != 64 || height != 64) { - WREG8(MGA_CURPOSXL, 0); - WREG8(MGA_CURPOSXH, 0); - return -EINVAL; - } - - if (pixels_current == pixels_1) - pixels_next = pixels_2; - else - pixels_next = pixels_1; - - obj = drm_gem_object_lookup(file_priv, handle); - if (!obj) - return -ENOENT; - gbo = drm_gem_vram_of_gem(obj); - ret = drm_gem_vram_pin(gbo, 0); - if (ret) { - dev_err(&dev->pdev->dev, "failed to lock user bo\n"); - goto err_drm_gem_object_put_unlocked; - } - src = drm_gem_vram_kmap(gbo, true, NULL); - if (IS_ERR(src)) { - ret = PTR_ERR(src); - dev_err(&dev->pdev->dev, - "failed to kmap user buffer updates\n"); - goto err_drm_gem_vram_unpin_src; - } - - /* Pin and map up-coming buffer to write colour indices */ - ret = drm_gem_vram_pin(pixels_next, DRM_GEM_VRAM_PL_FLAG_VRAM); - if (ret) { - dev_err(&dev->pdev->dev, - "failed to pin cursor buffer: %d\n", ret); - goto err_drm_gem_vram_kunmap_src; - } - dst = drm_gem_vram_kmap(pixels_next, true, NULL); - if (IS_ERR(dst)) { - ret = PTR_ERR(dst); - dev_err(&dev->pdev->dev, - "failed to kmap cursor updates: %d\n", ret); - goto err_drm_gem_vram_unpin_dst; - } - gpu_addr = drm_gem_vram_offset(pixels_next); - if (gpu_addr < 0) { - ret = (int)gpu_addr; - dev_err(&dev->pdev->dev, - "failed to get cursor scanout address: %d\n", ret); - goto err_drm_gem_vram_kunmap_dst; - } - dst_gpu = (u64)gpu_addr; - memset(&colour_set[0], 0, sizeof(uint32_t)*16); /* width*height*4 = 16384 */ for (i = 0; i < 16384; i += 4) { @@ -133,8 +38,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n"); warn_transparent = false; /* Only tell the user once. */ } - ret = -EINVAL; - goto err_drm_gem_vram_kunmap_dst; + return -EINVAL; } /* Don't need to store transparent pixels as colours */ if (this_colour>>24 == 0x0) @@ -155,8 +59,7 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, dev_info(&dev->pdev->dev, "Not enabling hardware cursor.\n"); warn_palette = false; /* Only tell the user once. */ } - ret = -EINVAL; - goto err_drm_gem_vram_kunmap_dst; + return -EINVAL; } *next_space = this_colour; next_space++; @@ -200,54 +103,218 @@ int mga_crtc_cursor_set(struct drm_crtc *crtc, memcpy_toio(dst + row*48, &this_row[0], 48); } + return 0; +} + +static void mgag200_cursor_set_base(struct mga_device *mdev, u64 address) +{ + u8 addrl = (address >> 10) & 0xff; + u8 addrh = (address >> 18) & 0x3f; + /* Program gpu address of cursor buffer */ - WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, (u8)((dst_gpu>>10) & 0xff)); - WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, (u8)((dst_gpu>>18) & 0x3f)); + WREG_DAC(MGA1064_CURSOR_BASE_ADR_LOW, addrl); + WREG_DAC(MGA1064_CURSOR_BASE_ADR_HI, addrh); +} + +static int mgag200_show_cursor(struct mga_device *mdev, void *src, + unsigned int width, unsigned int height) +{ + struct drm_device *dev = mdev->dev; + struct drm_gem_vram_object *gbo; + void *dst; + s64 off; + int ret; + + gbo = mdev->cursor.gbo[mdev->cursor.next_index]; + if (!gbo) { + WREG8(MGA_CURPOSXL, 0); + WREG8(MGA_CURPOSXH, 0); + return -ENOTSUPP; /* Didn't allocate space for cursors */ + } + dst = drm_gem_vram_vmap(gbo); + if (IS_ERR(dst)) { + ret = PTR_ERR(dst); + dev_err(&dev->pdev->dev, + "failed to map cursor updates: %d\n", ret); + return ret; + } + off = drm_gem_vram_offset(gbo); + if (off < 0) { + ret = (int)off; + dev_err(&dev->pdev->dev, + "failed to get cursor scanout address: %d\n", ret); + goto err_drm_gem_vram_vunmap; + } + + ret = mgag200_cursor_update(mdev, dst, src, width, height); + if (ret) + goto err_drm_gem_vram_vunmap; + mgag200_cursor_set_base(mdev, off); /* Adjust cursor control register to turn on the cursor */ WREG_DAC(MGA1064_CURSOR_CTL, 4); /* 16-colour palletized cursor mode */ - /* Now update internal buffer pointers */ - if (pixels_current) - drm_gem_vram_unpin(pixels_current); - mdev->cursor.pixels_current = pixels_next; + drm_gem_vram_vunmap(gbo, dst); - drm_gem_vram_kunmap(pixels_next); - drm_gem_vram_kunmap(gbo); - drm_gem_vram_unpin(gbo); - drm_gem_object_put_unlocked(obj); + ++mdev->cursor.next_index; + mdev->cursor.next_index %= ARRAY_SIZE(mdev->cursor.gbo); + + return 0; + +err_drm_gem_vram_vunmap: + drm_gem_vram_vunmap(gbo, dst); + return ret; +} + +/* + * Hide the cursor off screen. We can't disable the cursor hardware because + * it takes too long to re-activate and causes momentary corruption. + */ +static void mgag200_hide_cursor(struct mga_device *mdev) +{ + WREG8(MGA_CURPOSXL, 0); + WREG8(MGA_CURPOSXH, 0); +} + +static void mgag200_move_cursor(struct mga_device *mdev, int x, int y) +{ + if (WARN_ON(x <= 0)) + return; + if (WARN_ON(y <= 0)) + return; + if (WARN_ON(x & ~0xffff)) + return; + if (WARN_ON(y & ~0xffff)) + return; + + WREG8(MGA_CURPOSXL, x & 0xff); + WREG8(MGA_CURPOSXH, (x>>8) & 0xff); + + WREG8(MGA_CURPOSYL, y & 0xff); + WREG8(MGA_CURPOSYH, (y>>8) & 0xff); +} + +int mgag200_cursor_init(struct mga_device *mdev) +{ + struct drm_device *dev = mdev->dev; + size_t ncursors = ARRAY_SIZE(mdev->cursor.gbo); + size_t size; + int ret; + size_t i; + struct drm_gem_vram_object *gbo; + + size = roundup(64 * 48, PAGE_SIZE); + if (size * ncursors > mdev->vram_fb_available) + return -ENOMEM; + + for (i = 0; i < ncursors; ++i) { + gbo = drm_gem_vram_create(dev, &dev->vram_mm->bdev, + size, 0, false); + if (IS_ERR(gbo)) { + ret = PTR_ERR(gbo); + goto err_drm_gem_vram_put; + } + ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM | + DRM_GEM_VRAM_PL_FLAG_TOPDOWN); + if (ret) { + drm_gem_vram_put(gbo); + goto err_drm_gem_vram_put; + } + + mdev->cursor.gbo[i] = gbo; + } + + /* + * At the high end of video memory, we reserve space for + * buffer objects. The cursor plane uses this memory to store + * a double-buffered image of the current cursor. Hence, it's + * not available for framebuffers. + */ + mdev->vram_fb_available -= ncursors * size; return 0; -err_drm_gem_vram_kunmap_dst: - drm_gem_vram_kunmap(pixels_next); -err_drm_gem_vram_unpin_dst: - drm_gem_vram_unpin(pixels_next); -err_drm_gem_vram_kunmap_src: - drm_gem_vram_kunmap(gbo); -err_drm_gem_vram_unpin_src: - drm_gem_vram_unpin(gbo); +err_drm_gem_vram_put: + while (i) { + --i; + gbo = mdev->cursor.gbo[i]; + drm_gem_vram_unpin(gbo); + drm_gem_vram_put(gbo); + mdev->cursor.gbo[i] = NULL; + } + return ret; +} + +void mgag200_cursor_fini(struct mga_device *mdev) +{ + size_t i; + struct drm_gem_vram_object *gbo; + + for (i = 0; i < ARRAY_SIZE(mdev->cursor.gbo); ++i) { + gbo = mdev->cursor.gbo[i]; + drm_gem_vram_unpin(gbo); + drm_gem_vram_put(gbo); + } +} + +int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height) +{ + struct drm_device *dev = crtc->dev; + struct mga_device *mdev = (struct mga_device *)dev->dev_private; + struct drm_gem_object *obj; + struct drm_gem_vram_object *gbo = NULL; + int ret; + u8 *src; + + if (!handle || !file_priv) { + mgag200_hide_cursor(mdev); + return 0; + } + + if (width != 64 || height != 64) { + WREG8(MGA_CURPOSXL, 0); + WREG8(MGA_CURPOSXH, 0); + return -EINVAL; + } + + obj = drm_gem_object_lookup(file_priv, handle); + if (!obj) + return -ENOENT; + gbo = drm_gem_vram_of_gem(obj); + src = drm_gem_vram_vmap(gbo); + if (IS_ERR(src)) { + ret = PTR_ERR(src); + dev_err(&dev->pdev->dev, + "failed to map user buffer updates\n"); + goto err_drm_gem_object_put_unlocked; + } + + ret = mgag200_show_cursor(mdev, src, width, height); + if (ret) + goto err_drm_gem_vram_vunmap; + + /* Now update internal buffer pointers */ + drm_gem_vram_vunmap(gbo, src); + drm_gem_object_put_unlocked(obj); + + return 0; +err_drm_gem_vram_vunmap: + drm_gem_vram_vunmap(gbo, src); err_drm_gem_object_put_unlocked: drm_gem_object_put_unlocked(obj); return ret; } -int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) +int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) { struct mga_device *mdev = (struct mga_device *)crtc->dev->dev_private; + /* Our origin is at (64,64) */ x += 64; y += 64; - BUG_ON(x <= 0); - BUG_ON(y <= 0); - BUG_ON(x & ~0xffff); - BUG_ON(y & ~0xffff); + mgag200_move_cursor(mdev, x, y); - WREG8(MGA_CURPOSXL, x & 0xff); - WREG8(MGA_CURPOSXH, (x>>8) & 0xff); - - WREG8(MGA_CURPOSYL, y & 0xff); - WREG8(MGA_CURPOSYH, (y>>8) & 0xff); return 0; } diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index afd9119b6cf1..397f8b0a9af8 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -46,7 +46,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist); static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { - drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "mgag200drmfb"); + drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb"); return drm_get_pci_dev(pdev, ent, &driver); } @@ -58,10 +58,7 @@ static void mga_pci_remove(struct pci_dev *pdev) drm_put_dev(dev); } -static const struct file_operations mgag200_driver_fops = { - .owner = THIS_MODULE, - DRM_VRAM_MM_FILE_OPERATIONS -}; +DEFINE_DRM_GEM_FOPS(mgag200_driver_fops); static struct drm_driver driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET, diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h index 1c93f8dc08c7..0ea9a525e57d 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.h +++ b/drivers/gpu/drm/mgag200/mgag200_drv.h @@ -19,7 +19,6 @@ #include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> #include <drm/drm_gem_vram_helper.h> -#include <drm/drm_vram_mm_helper.h> #include "mgag200_reg.h" @@ -130,16 +129,8 @@ struct mga_connector { }; struct mga_cursor { - /* - We have to have 2 buffers for the cursor to avoid occasional - corruption while switching cursor icons. - If either of these is NULL, then don't do hardware cursors, and - fall back to software. - */ - struct drm_gem_vram_object *pixels_1; - struct drm_gem_vram_object *pixels_2; - /* The currently displayed icon, this points to one of pixels_1, or pixels_2 */ - struct drm_gem_vram_object *pixels_current; + struct drm_gem_vram_object *gbo[2]; + unsigned int next_index; }; struct mga_mc { @@ -174,6 +165,8 @@ struct mga_device { struct mga_cursor cursor; + size_t vram_fb_available; + bool suspended; int num_crtc; enum mga_type type; @@ -204,8 +197,10 @@ int mgag200_mm_init(struct mga_device *mdev); void mgag200_mm_fini(struct mga_device *mdev); int mgag200_mmap(struct file *filp, struct vm_area_struct *vma); -int mga_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, - uint32_t handle, uint32_t width, uint32_t height); -int mga_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); +int mgag200_cursor_init(struct mga_device *mdev); +void mgag200_cursor_fini(struct mga_device *mdev); +int mgag200_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, + uint32_t handle, uint32_t width, uint32_t height); +int mgag200_crtc_cursor_move(struct drm_crtc *crtc, int x, int y); #endif /* __MGAG200_DRV_H__ */ diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c index a9773334dedf..5f74aabcd3df 100644 --- a/drivers/gpu/drm/mgag200/mgag200_main.c +++ b/drivers/gpu/drm/mgag200/mgag200_main.c @@ -159,7 +159,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) drm_mode_config_init(dev); dev->mode_config.funcs = (void *)&mga_mode_funcs; - if (IS_G200_SE(mdev) && mdev->mc.vram_size < (2048*1024)) + if (IS_G200_SE(mdev) && mdev->vram_fb_available < (2048*1024)) dev->mode_config.preferred_depth = 16; else dev->mode_config.preferred_depth = 32; @@ -171,20 +171,10 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) goto err_modeset; } - /* Make small buffers to store a hardware cursor (double buffered icon updates) */ - mdev->cursor.pixels_1 = drm_gem_vram_create(dev, &dev->vram_mm->bdev, - roundup(48*64, PAGE_SIZE), - 0, 0); - mdev->cursor.pixels_2 = drm_gem_vram_create(dev, &dev->vram_mm->bdev, - roundup(48*64, PAGE_SIZE), - 0, 0); - if (IS_ERR(mdev->cursor.pixels_2) || IS_ERR(mdev->cursor.pixels_1)) { - mdev->cursor.pixels_1 = NULL; - mdev->cursor.pixels_2 = NULL; + r = mgag200_cursor_init(mdev); + if (r) dev_warn(&dev->pdev->dev, - "Could not allocate space for cursors. Not doing hardware cursors.\n"); - } - mdev->cursor.pixels_current = NULL; + "Could not initialize cursors. Not doing hardware cursors.\n"); r = drm_fbdev_generic_setup(mdev->dev, 0); if (r) @@ -194,6 +184,7 @@ int mgag200_driver_load(struct drm_device *dev, unsigned long flags) err_modeset: drm_mode_config_cleanup(dev); + mgag200_cursor_fini(mdev); mgag200_mm_fini(mdev); err_mm: dev->dev_private = NULL; @@ -209,6 +200,7 @@ void mgag200_driver_unload(struct drm_device *dev) return; mgag200_modeset_fini(mdev); drm_mode_config_cleanup(dev); + mgag200_cursor_fini(mdev); mgag200_mm_fini(mdev); dev->dev_private = NULL; } diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 5e778b5f1a10..5ec697148fc1 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -1413,8 +1413,8 @@ static void mga_crtc_disable(struct drm_crtc *crtc) /* These provide the minimum set of functions required to handle a CRTC */ static const struct drm_crtc_funcs mga_crtc_funcs = { - .cursor_set = mga_crtc_cursor_set, - .cursor_move = mga_crtc_cursor_move, + .cursor_set = mgag200_crtc_cursor_set, + .cursor_move = mgag200_crtc_cursor_move, .gamma_set = mga_crtc_gamma_set, .set_config = drm_crtc_helper_set_config, .destroy = mga_crtc_destroy, @@ -1629,7 +1629,7 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector, bpp = connector->cmdline_mode.bpp; } - if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->mc.vram_size) { + if ((mode->hdisplay * mode->vdisplay * (bpp/8)) > mdev->vram_fb_available) { if (connector->cmdline_mode.specified) connector->cmdline_mode.specified = false; return MODE_BAD; @@ -1638,16 +1638,6 @@ static enum drm_mode_status mga_vga_mode_valid(struct drm_connector *connector, return MODE_OK; } -static struct drm_encoder *mga_connector_best_encoder(struct drm_connector - *connector) -{ - int enc_id = connector->encoder_ids[0]; - /* pick the encoder ids */ - if (enc_id) - return drm_encoder_find(connector->dev, NULL, enc_id); - return NULL; -} - static void mga_connector_destroy(struct drm_connector *connector) { struct mga_connector *mga_connector = to_mga_connector(connector); @@ -1659,7 +1649,6 @@ static void mga_connector_destroy(struct drm_connector *connector) static const struct drm_connector_helper_funcs mga_vga_connector_helper_funcs = { .get_modes = mga_vga_get_modes, .mode_valid = mga_vga_mode_valid, - .best_encoder = mga_connector_best_encoder, }; static const struct drm_connector_funcs mga_vga_connector_funcs = { diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 73a6b848601c..99997d737362 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -37,8 +37,7 @@ int mgag200_mm_init(struct mga_device *mdev) struct drm_device *dev = mdev->dev; vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0), - mdev->mc.vram_size, - &drm_gem_vram_mm_funcs); + mdev->mc.vram_size); if (IS_ERR(vmm)) { ret = PTR_ERR(vmm); DRM_ERROR("Error initializing VRAM MM; %d\n", ret); @@ -51,6 +50,8 @@ int mgag200_mm_init(struct mga_device *mdev) mdev->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), pci_resource_len(dev->pdev, 0)); + mdev->vram_fb_available = mdev->mc.vram_size; + return 0; } @@ -58,6 +59,8 @@ void mgag200_mm_fini(struct mga_device *mdev) { struct drm_device *dev = mdev->dev; + mdev->vram_fb_available = 0; + drm_vram_helper_release_mm(dev); arch_io_free_memtype_wc(pci_resource_start(dev->pdev, 0), diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index e686331fa089..691c1a277d91 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -352,26 +352,26 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu, cxdbg = ioremap(res->start, resource_size(res)); if (cxdbg) { - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLT, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT, A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(0xf)); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_CNTLM, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM, A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(0xf)); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0, 0x76543210); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1, + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1, 0xFEDCBA98); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2, 0); - cxdbg_write(cxdbg, REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2, 0); + cxdbg_write(cxdbg, REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3, 0); } a6xx_state->debugbus = state_kcalloc(a6xx_state, diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h index 4c889aabdaf9..959d03e007fa 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h @@ -31,7 +31,7 @@ */ #define DPU_DEBUG(fmt, ...) \ do { \ - if (unlikely(drm_debug & DRM_UT_KMS)) \ + if (drm_debug_enabled(DRM_UT_KMS)) \ DRM_DEBUG(fmt, ##__VA_ARGS__); \ else \ pr_debug(fmt, ##__VA_ARGS__); \ @@ -43,7 +43,7 @@ */ #define DPU_DEBUG_DRIVER(fmt, ...) \ do { \ - if (unlikely(drm_debug & DRM_UT_DRIVER)) \ + if (drm_debug_enabled(DRM_UT_DRIVER)) \ DRM_ERROR(fmt, ##__VA_ARGS__); \ else \ pr_debug(fmt, ##__VA_ARGS__); \ diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h index 0da8a4e428ad..eff1a4c61258 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.h +++ b/drivers/gpu/drm/msm/dsi/dsi.h @@ -9,6 +9,7 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_panel.h> diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c index 0f312ac5b624..ad4e963ccd9b 100644 --- a/drivers/gpu/drm/msm/edp/edp.c +++ b/drivers/gpu/drm/msm/edp/edp.c @@ -178,7 +178,9 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev, goto fail; } - encoder->bridge = edp->bridge; + ret = drm_bridge_attach(encoder, edp->bridge, NULL); + if (ret) + goto fail; priv->bridges[priv->num_bridges++] = edp->bridge; priv->connectors[priv->num_connectors++] = edp->connector; diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h index f2c17858a703..eb34243dad53 100644 --- a/drivers/gpu/drm/msm/edp/edp.h +++ b/drivers/gpu/drm/msm/edp/edp.h @@ -10,6 +10,7 @@ #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/platform_device.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_dp_helper.h> diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c index 7f3dd3ffe2c9..0d9657cc70db 100644 --- a/drivers/gpu/drm/msm/edp/edp_ctrl.c +++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c @@ -89,7 +89,6 @@ struct edp_ctrl { /* edid raw data */ struct edid *edid; - struct drm_dp_link dp_link; struct drm_dp_aux *drm_aux; /* dpcd raw data */ @@ -403,7 +402,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl) u32 prate; u32 lrate; u32 bpp; - u8 max_lane = ctrl->dp_link.num_lanes; + u8 max_lane = drm_dp_max_lane_count(ctrl->dpcd); u8 lane; prate = ctrl->pixel_rate; @@ -413,7 +412,7 @@ static void edp_fill_link_cfg(struct edp_ctrl *ctrl) * By default, use the maximum link rate and minimum lane count, * so that we can do rate down shift during link training. */ - ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate); + ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE]; prate *= bpp; prate /= 8; /* in kByte */ @@ -439,7 +438,7 @@ static void edp_config_ctrl(struct edp_ctrl *ctrl) data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1); - if (ctrl->dp_link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + if (drm_dp_enhanced_frame_cap(ctrl->dpcd)) data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING; depth = EDP_6BIT; @@ -701,7 +700,7 @@ static int edp_link_rate_down_shift(struct edp_ctrl *ctrl) rate = ctrl->link_rate; lane = ctrl->lane_cnt; - max_lane = ctrl->dp_link.num_lanes; + max_lane = drm_dp_max_lane_count(ctrl->dpcd); bpp = ctrl->color_depth * 3; prate = ctrl->pixel_rate; @@ -751,18 +750,22 @@ static int edp_clear_training_pattern(struct edp_ctrl *ctrl) static int edp_do_link_train(struct edp_ctrl *ctrl) { + u8 values[2]; int ret; - struct drm_dp_link dp_link; DBG(""); /* * Set the current link rate and lane cnt to panel. They may have been * adjusted and the values are different from them in DPCD CAP */ - dp_link.num_lanes = ctrl->lane_cnt; - dp_link.rate = drm_dp_bw_code_to_link_rate(ctrl->link_rate); - dp_link.capabilities = ctrl->dp_link.capabilities; - if (drm_dp_link_configure(ctrl->drm_aux, &dp_link) < 0) + values[0] = ctrl->lane_cnt; + values[1] = ctrl->link_rate; + + if (drm_dp_enhanced_frame_cap(ctrl->dpcd)) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + if (drm_dp_dpcd_write(ctrl->drm_aux, DP_LINK_BW_SET, values, + sizeof(values)) < 0) return EDP_TRAIN_FAIL; ctrl->v_level = 0; /* start from default level */ @@ -952,6 +955,7 @@ static void edp_ctrl_on_worker(struct work_struct *work) { struct edp_ctrl *ctrl = container_of( work, struct edp_ctrl, on_work); + u8 value; int ret; mutex_lock(&ctrl->dev_mutex); @@ -965,9 +969,27 @@ static void edp_ctrl_on_worker(struct work_struct *work) edp_ctrl_link_enable(ctrl, 1); edp_ctrl_irq_enable(ctrl, 1); - ret = drm_dp_link_power_up(ctrl->drm_aux, &ctrl->dp_link); - if (ret) - goto fail; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) { + ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value); + if (ret < 0) + goto fail; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + ret = drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value); + if (ret < 0) + goto fail; + + /* + * According to the DP 1.1 specification, a "Sink Device must + * exit the power saving state within 1 ms" (Section 2.5.3.1, + * Table 5-52, "Sink Control Field" (register 0x600). + */ + usleep_range(1000, 2000); + } ctrl->power_on = true; @@ -1011,7 +1033,19 @@ static void edp_ctrl_off_worker(struct work_struct *work) edp_state_ctrl(ctrl, 0); - drm_dp_link_power_down(ctrl->drm_aux, &ctrl->dp_link); + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) { + u8 value; + int ret; + + ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value); + if (ret > 0) { + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value); + } + } edp_ctrl_irq_enable(ctrl, 0); @@ -1225,14 +1259,8 @@ int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl, edp_ctrl_irq_enable(ctrl, 1); } - ret = drm_dp_link_probe(ctrl->drm_aux, &ctrl->dp_link); - if (ret) { - pr_err("%s: read dpcd cap failed, %d\n", __func__, ret); - goto disable_ret; - } - /* Initialize link rate as panel max link rate */ - ctrl->link_rate = drm_dp_link_rate_to_bw_code(ctrl->dp_link.rate); + ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE]; ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc); if (!ctrl->edid) { diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 355afb936401..1a9b6289637d 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -327,7 +327,9 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } - encoder->bridge = hdmi->bridge; + ret = drm_bridge_attach(encoder, hdmi->bridge, NULL); + if (ret) + goto fail; priv->bridges[priv->num_bridges++] = hdmi->bridge; priv->connectors[priv->num_connectors++] = hdmi->connector; diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h index bdac452b00fb..d0b84f0abee1 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.h +++ b/drivers/gpu/drm/msm/hdmi/hdmi.h @@ -14,6 +14,8 @@ #include <linux/gpio/consumer.h> #include <linux/hdmi.h> +#include <drm/drm_bridge.h> + #include "msm_drv.h" #include "hdmi.xml.h" diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c index 6be879578140..1c74381a4fc9 100644 --- a/drivers/gpu/drm/msm/msm_debugfs.c +++ b/drivers/gpu/drm/msm/msm_debugfs.c @@ -47,12 +47,8 @@ static int msm_gpu_release(struct inode *inode, struct file *file) struct msm_gpu_show_priv *show_priv = m->private; struct msm_drm_private *priv = show_priv->dev->dev_private; struct msm_gpu *gpu = priv->gpu; - int ret; - - ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex); - if (ret) - return ret; + mutex_lock(&show_priv->dev->struct_mutex); gpu->funcs->gpu_state_put(show_priv->state); mutex_unlock(&show_priv->dev->struct_mutex); diff --git a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c index 12421567af89..b69ace8bf526 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_crtc.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_crtc.c @@ -95,8 +95,11 @@ static void mxsfb_set_bus_fmt(struct mxsfb_drm_private *mxsfb) reg = readl(mxsfb->base + LCDC_CTRL); - if (mxsfb->connector.display_info.num_bus_formats) - bus_format = mxsfb->connector.display_info.bus_formats[0]; + if (mxsfb->connector->display_info.num_bus_formats) + bus_format = mxsfb->connector->display_info.bus_formats[0]; + + DRM_DEV_DEBUG_DRIVER(drm->dev, "Using bus_format: 0x%08X\n", + bus_format); reg &= ~CTRL_BUS_WIDTH_MASK; switch (bus_format) { @@ -204,8 +207,9 @@ static dma_addr_t mxsfb_get_fb_paddr(struct mxsfb_drm_private *mxsfb) static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) { + struct drm_device *drm = mxsfb->pipe.crtc.dev; struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode; - const u32 bus_flags = mxsfb->connector.display_info.bus_flags; + u32 bus_flags = mxsfb->connector->display_info.bus_flags; u32 vdctrl0, vsync_pulse_len, hsync_pulse_len; int err; @@ -229,6 +233,16 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) clk_set_rate(mxsfb->clk, m->crtc_clock * 1000); + if (mxsfb->bridge && mxsfb->bridge->timings) + bus_flags = mxsfb->bridge->timings->input_bus_flags; + + DRM_DEV_DEBUG_DRIVER(drm->dev, "Pixel clock: %dkHz (actual: %dkHz)\n", + m->crtc_clock, + (int)(clk_get_rate(mxsfb->clk) / 1000)); + DRM_DEV_DEBUG_DRIVER(drm->dev, "Connector bus_flags: 0x%08X\n", + bus_flags); + DRM_DEV_DEBUG_DRIVER(drm->dev, "Mode flags: 0x%08X\n", m->flags); + writel(TRANSFER_COUNT_SET_VCOUNT(m->crtc_vdisplay) | TRANSFER_COUNT_SET_HCOUNT(m->crtc_hdisplay), mxsfb->base + mxsfb->devdata->transfer_count); diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index e8506335cd15..497cf443a9af 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -101,9 +101,25 @@ static void mxsfb_pipe_enable(struct drm_simple_display_pipe *pipe, struct drm_crtc_state *crtc_state, struct drm_plane_state *plane_state) { + struct drm_connector *connector; struct mxsfb_drm_private *mxsfb = drm_pipe_to_mxsfb_drm_private(pipe); struct drm_device *drm = pipe->plane.dev; + if (!mxsfb->connector) { + list_for_each_entry(connector, + &drm->mode_config.connector_list, + head) + if (connector->encoder == &mxsfb->pipe.encoder) { + mxsfb->connector = connector; + break; + } + } + + if (!mxsfb->connector) { + dev_warn(drm->dev, "No connector attached, using default\n"); + mxsfb->connector = &mxsfb->panel_connector; + } + pm_runtime_get_sync(drm->dev); drm_panel_prepare(mxsfb->panel); mxsfb_crtc_enable(mxsfb); @@ -129,6 +145,9 @@ static void mxsfb_pipe_disable(struct drm_simple_display_pipe *pipe) drm_crtc_send_vblank_event(crtc, event); } spin_unlock_irq(&drm->event_lock); + + if (mxsfb->connector != &mxsfb->panel_connector) + mxsfb->connector = NULL; } static void mxsfb_pipe_update(struct drm_simple_display_pipe *pipe, @@ -226,16 +245,33 @@ static int mxsfb_load(struct drm_device *drm, unsigned long flags) ret = drm_simple_display_pipe_init(drm, &mxsfb->pipe, &mxsfb_funcs, mxsfb_formats, ARRAY_SIZE(mxsfb_formats), NULL, - &mxsfb->connector); + mxsfb->connector); if (ret < 0) { dev_err(drm->dev, "Cannot setup simple display pipe\n"); goto err_vblank; } - ret = drm_panel_attach(mxsfb->panel, &mxsfb->connector); - if (ret) { - dev_err(drm->dev, "Cannot connect panel\n"); - goto err_vblank; + /* + * Attach panel only if there is one. + * If there is no panel attach, it must be a bridge. In this case, we + * need a reference to its connector for a proper initialization. + * We will do this check in pipe->enable(), since the connector won't + * be attached to an encoder until then. + */ + + if (mxsfb->panel) { + ret = drm_panel_attach(mxsfb->panel, mxsfb->connector); + if (ret) { + dev_err(drm->dev, "Cannot connect panel: %d\n", ret); + goto err_vblank; + } + } else if (mxsfb->bridge) { + ret = drm_simple_display_pipe_attach_bridge(&mxsfb->pipe, + mxsfb->bridge); + if (ret) { + dev_err(drm->dev, "Cannot connect bridge: %d\n", ret); + goto err_vblank; + } } drm->mode_config.min_width = MXSFB_MIN_XRES; diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.h b/drivers/gpu/drm/mxsfb/mxsfb_drv.h index d975300dca05..0b65b5194a9c 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.h +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.h @@ -27,8 +27,10 @@ struct mxsfb_drm_private { struct clk *clk_disp_axi; struct drm_simple_display_pipe pipe; - struct drm_connector connector; + struct drm_connector panel_connector; + struct drm_connector *connector; struct drm_panel *panel; + struct drm_bridge *bridge; }; int mxsfb_setup_crtc(struct drm_device *dev); diff --git a/drivers/gpu/drm/mxsfb/mxsfb_out.c b/drivers/gpu/drm/mxsfb/mxsfb_out.c index be36f4d6cc96..4eb94744c526 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_out.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_out.c @@ -21,7 +21,8 @@ static struct mxsfb_drm_private * drm_connector_to_mxsfb_drm_private(struct drm_connector *connector) { - return container_of(connector, struct mxsfb_drm_private, connector); + return container_of(connector, struct mxsfb_drm_private, + panel_connector); } static int mxsfb_panel_get_modes(struct drm_connector *connector) @@ -76,22 +77,23 @@ static const struct drm_connector_funcs mxsfb_panel_connector_funcs = { int mxsfb_create_output(struct drm_device *drm) { struct mxsfb_drm_private *mxsfb = drm->dev_private; - struct drm_panel *panel; int ret; - ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, &panel, NULL); + ret = drm_of_find_panel_or_bridge(drm->dev->of_node, 0, 0, + &mxsfb->panel, &mxsfb->bridge); if (ret) return ret; - mxsfb->connector.dpms = DRM_MODE_DPMS_OFF; - mxsfb->connector.polled = 0; - drm_connector_helper_add(&mxsfb->connector, - &mxsfb_panel_connector_helper_funcs); - ret = drm_connector_init(drm, &mxsfb->connector, - &mxsfb_panel_connector_funcs, - DRM_MODE_CONNECTOR_Unknown); - if (!ret) - mxsfb->panel = panel; + if (mxsfb->panel) { + mxsfb->connector = &mxsfb->panel_connector; + mxsfb->connector->dpms = DRM_MODE_DPMS_OFF; + mxsfb->connector->polled = 0; + drm_connector_helper_add(mxsfb->connector, + &mxsfb_panel_connector_helper_funcs); + ret = drm_connector_init(drm, mxsfb->connector, + &mxsfb_panel_connector_funcs, + DRM_MODE_CONNECTOR_Unknown); + } return ret; } diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index dc64863b5fd8..44ee82d0c9b6 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -256,7 +256,7 @@ nv04_display_create(struct drm_device *dev) list_for_each_entry_safe(connector, ct, &dev->mode_config.connector_list, head) { - if (!connector->encoder_ids[0]) { + if (!connector->possible_encoders) { NV_WARN(drm, "%s has no encoders, removing\n", connector->name); connector->funcs->destroy(connector); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index b46be8a091e9..549486f1d937 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -986,20 +986,11 @@ nv50_mstc_atomic_check(struct drm_connector *connector, return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port); } -static const struct drm_connector_helper_funcs -nv50_mstc_help = { - .get_modes = nv50_mstc_get_modes, - .mode_valid = nv50_mstc_mode_valid, - .best_encoder = nv50_mstc_best_encoder, - .atomic_best_encoder = nv50_mstc_atomic_best_encoder, - .atomic_check = nv50_mstc_atomic_check, -}; - -static enum drm_connector_status -nv50_mstc_detect(struct drm_connector *connector, bool force) +static int +nv50_mstc_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) { struct nv50_mstc *mstc = nv50_mstc(connector); - enum drm_connector_status conn_status; int ret; if (drm_connector_is_unregistered(connector)) @@ -1009,14 +1000,24 @@ nv50_mstc_detect(struct drm_connector *connector, bool force) if (ret < 0 && ret != -EACCES) return connector_status_disconnected; - conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr, - mstc->port); + ret = drm_dp_mst_detect_port(connector, ctx, mstc->port->mgr, + mstc->port); pm_runtime_mark_last_busy(connector->dev->dev); pm_runtime_put_autosuspend(connector->dev->dev); - return conn_status; + return ret; } +static const struct drm_connector_helper_funcs +nv50_mstc_help = { + .get_modes = nv50_mstc_get_modes, + .mode_valid = nv50_mstc_mode_valid, + .best_encoder = nv50_mstc_best_encoder, + .atomic_best_encoder = nv50_mstc_atomic_best_encoder, + .atomic_check = nv50_mstc_atomic_check, + .detect_ctx = nv50_mstc_detect, +}; + static void nv50_mstc_destroy(struct drm_connector *connector) { @@ -1031,7 +1032,6 @@ nv50_mstc_destroy(struct drm_connector *connector) static const struct drm_connector_funcs nv50_mstc = { .reset = nouveau_conn_reset, - .detect = nv50_mstc_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = nv50_mstc_destroy, .atomic_duplicate_state = nouveau_conn_atomic_duplicate_state, @@ -1309,14 +1309,14 @@ nv50_mstm_fini(struct nv50_mstm *mstm) } static void -nv50_mstm_init(struct nv50_mstm *mstm) +nv50_mstm_init(struct nv50_mstm *mstm, bool runtime) { int ret; if (!mstm || !mstm->mgr.mst_state) return; - ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr); + ret = drm_dp_mst_topology_mgr_resume(&mstm->mgr, !runtime); if (ret == -1) { drm_dp_mst_topology_mgr_set_mst(&mstm->mgr, false); drm_kms_helper_hotplug_event(mstm->mgr.dev); @@ -2263,7 +2263,7 @@ nv50_display_init(struct drm_device *dev, bool resume, bool runtime) if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) { struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); - nv50_mstm_init(nv_encoder->dp.mstm); + nv50_mstm_init(nv_encoder->dp.mstm, runtime); } } @@ -2392,7 +2392,7 @@ nv50_display_create(struct drm_device *dev) /* cull any connectors we created that don't have an encoder */ list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) { - if (connector->encoder_ids[0]) + if (connector->possible_encoders) continue; NV_WARN(drm, "%s has no encoders, removing\n", diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 94dfa2e5a9ab..5b413588b823 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -365,9 +365,8 @@ find_encoder(struct drm_connector *connector, int type) { struct nouveau_encoder *nv_encoder; struct drm_encoder *enc; - int i; - drm_connector_for_each_possible_encoder(connector, enc, i) { + drm_connector_for_each_possible_encoder(connector, enc) { nv_encoder = nouveau_encoder(enc); if (type == DCB_OUTPUT_ANY || @@ -414,10 +413,10 @@ nouveau_connector_ddc_detect(struct drm_connector *connector) struct drm_device *dev = connector->dev; struct nouveau_encoder *nv_encoder = NULL, *found = NULL; struct drm_encoder *encoder; - int i, ret; + int ret; bool switcheroo_ddc = false; - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { nv_encoder = nouveau_encoder(encoder); switch (nv_encoder->dcb->type) { @@ -1131,6 +1130,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify) const char *name = connector->name; struct nouveau_encoder *nv_encoder; int ret; + bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); + + if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { + NV_DEBUG(drm, "service %s\n", name); + drm_dp_cec_irq(&nv_connector->aux); + if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) + nv50_mstm_service(nv_encoder->dp.mstm); + + return NVIF_NOTIFY_KEEP; + } ret = pm_runtime_get(drm->dev->dev); if (ret == 0) { @@ -1151,25 +1160,16 @@ nouveau_connector_hotplug(struct nvif_notify *notify) return NVIF_NOTIFY_DROP; } - if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) { - NV_DEBUG(drm, "service %s\n", name); - drm_dp_cec_irq(&nv_connector->aux); - if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) - nv50_mstm_service(nv_encoder->dp.mstm); - } else { - bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG); - + if (!plugged) + drm_dp_cec_unset_edid(&nv_connector->aux); + NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); + if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) { if (!plugged) - drm_dp_cec_unset_edid(&nv_connector->aux); - NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name); - if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) { - if (!plugged) - nv50_mstm_remove(nv_encoder->dp.mstm); - } - - drm_helper_hpd_irq_event(connector->dev); + nv50_mstm_remove(nv_encoder->dp.mstm); } + drm_helper_hpd_irq_event(connector->dev); + pm_runtime_mark_last_busy(drm->dev->dev); pm_runtime_put_autosuspend(drm->dev->dev); return NVIF_NOTIFY_KEEP; @@ -1415,8 +1415,7 @@ nouveau_connector_create(struct drm_device *dev, switch (type) { case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_eDP: - drm_dp_cec_register_connector(&nv_connector->aux, - connector->name, dev->dev); + drm_dp_cec_register_connector(&nv_connector->aux, connector); break; } diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 6f038511a03a..53f9bceaf17a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -407,6 +407,17 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime) struct drm_connector_list_iter conn_iter; int ret; + /* + * Enable hotplug interrupts (done as early as possible, since we need + * them for MST) + */ + drm_connector_list_iter_begin(dev, &conn_iter); + nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { + struct nouveau_connector *conn = nouveau_connector(connector); + nvif_notify_get(&conn->hpd); + } + drm_connector_list_iter_end(&conn_iter); + ret = disp->init(dev, resume, runtime); if (ret) return ret; @@ -416,14 +427,6 @@ nouveau_display_init(struct drm_device *dev, bool resume, bool runtime) */ drm_kms_helper_poll_enable(dev); - /* enable hotplug interrupts */ - drm_connector_list_iter_begin(dev, &conn_iter); - nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) { - struct nouveau_connector *conn = nouveau_connector(connector); - nvif_notify_get(&conn->hpd); - } - drm_connector_list_iter_end(&conn_iter); - return ret; } diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index f0daf958e03a..77a0c6ad3cef 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -236,6 +236,7 @@ nouveau_ttm_init(struct nouveau_drm *drm) ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver, dev->anon_inode->i_mapping, + dev->vma_offset_manager, drm->client.mmu.dmabits <= 32 ? true : false); if (ret) { NV_ERROR(drm, "error initialising bo driver, %d\n", ret); diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile index 904101c5e79d..5950c3f52c2e 100644 --- a/drivers/gpu/drm/omapdrm/dss/Makefile +++ b/drivers/gpu/drm/omapdrm/dss/Makefile @@ -6,7 +6,7 @@ omapdss-base-y := base.o display.o dss-of.o output.o obj-$(CONFIG_OMAP2_DSS) += omapdss.o # Core DSS files -omapdss-y := core.o dss.o dispc.o dispc_coefs.o \ +omapdss-y := dss.o dispc.o dispc_coefs.o \ pll.o video-pll.o omapdss-$(CONFIG_OMAP2_DSS_DPI) += dpi.o omapdss-$(CONFIG_OMAP2_DSS_VENC) += venc.o diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c deleted file mode 100644 index 6ac497b63711..000000000000 --- a/drivers/gpu/drm/omapdrm/dss/core.c +++ /dev/null @@ -1,55 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (C) 2009 Nokia Corporation - * Author: Tomi Valkeinen <tomi.valkeinen@ti.com> - * - * Some code and ideas taken from drivers/video/omap/ driver - * by Imre Deak. - */ - -#define DSS_SUBSYS_NAME "CORE" - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> - -#include "omapdss.h" -#include "dss.h" - -/* INIT */ -static struct platform_driver * const omap_dss_drivers[] = { - &omap_dsshw_driver, - &omap_dispchw_driver, -#ifdef CONFIG_OMAP2_DSS_DSI - &omap_dsihw_driver, -#endif -#ifdef CONFIG_OMAP2_DSS_VENC - &omap_venchw_driver, -#endif -#ifdef CONFIG_OMAP4_DSS_HDMI - &omapdss_hdmi4hw_driver, -#endif -#ifdef CONFIG_OMAP5_DSS_HDMI - &omapdss_hdmi5hw_driver, -#endif -}; - -static int __init omap_dss_init(void) -{ - return platform_register_drivers(omap_dss_drivers, - ARRAY_SIZE(omap_dss_drivers)); -} - -static void __exit omap_dss_exit(void) -{ - platform_unregister_drivers(omap_dss_drivers, - ARRAY_SIZE(omap_dss_drivers)); -} - -module_init(omap_dss_init); -module_exit(omap_dss_exit); - -MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); -MODULE_DESCRIPTION("OMAP2/3 Display Subsystem"); -MODULE_LICENSE("GPL v2"); - diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c index ed0ccbeed70f..413dbdd1771e 100644 --- a/drivers/gpu/drm/omapdrm/dss/dispc.c +++ b/drivers/gpu/drm/omapdrm/dss/dispc.c @@ -114,6 +114,7 @@ struct dispc_features { const unsigned int num_reg_fields; const enum omap_overlay_caps *overlay_caps; const u32 **supported_color_modes; + const u32 *supported_scaler_color_modes; unsigned int num_mgrs; unsigned int num_ovls; unsigned int buffer_size_unit; @@ -184,9 +185,6 @@ struct dispc_device { struct regmap *syscon_pol; u32 syscon_pol_offset; - - /* DISPC_CONTROL & DISPC_CONFIG lock*/ - spinlock_t control_lock; }; enum omap_color_component { @@ -368,25 +366,17 @@ static inline u32 dispc_read_reg(struct dispc_device *dispc, u16 idx) static u32 mgr_fld_read(struct dispc_device *dispc, enum omap_channel channel, enum mgr_reg_fields regfld) { - const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld]; + const struct dispc_reg_field *rfld = &mgr_desc[channel].reg_desc[regfld]; - return REG_GET(dispc, rfld.reg, rfld.high, rfld.low); + return REG_GET(dispc, rfld->reg, rfld->high, rfld->low); } static void mgr_fld_write(struct dispc_device *dispc, enum omap_channel channel, enum mgr_reg_fields regfld, int val) { - const struct dispc_reg_field rfld = mgr_desc[channel].reg_desc[regfld]; - const bool need_lock = rfld.reg == DISPC_CONTROL || rfld.reg == DISPC_CONFIG; - unsigned long flags; + const struct dispc_reg_field *rfld = &mgr_desc[channel].reg_desc[regfld]; - if (need_lock) { - spin_lock_irqsave(&dispc->control_lock, flags); - REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low); - spin_unlock_irqrestore(&dispc->control_lock, flags); - } else { - REG_FLD_MOD(dispc, rfld.reg, val, rfld.high, rfld.low); - } + REG_FLD_MOD(dispc, rfld->reg, val, rfld->high, rfld->low); } static int dispc_get_num_ovls(struct dispc_device *dispc) @@ -2510,6 +2500,19 @@ static int dispc_ovl_calc_scaling(struct dispc_device *dispc, if (width == out_width && height == out_height) return 0; + if (dispc->feat->supported_scaler_color_modes) { + const u32 *modes = dispc->feat->supported_scaler_color_modes; + unsigned int i; + + for (i = 0; modes[i]; ++i) { + if (modes[i] == fourcc) + break; + } + + if (modes[i] == 0) + return -EINVAL; + } + if (plane == OMAP_DSS_WB) { switch (fourcc) { case DRM_FORMAT_NV12: @@ -4225,6 +4228,12 @@ static const u32 *omap4_dispc_supported_color_modes[] = { DRM_FORMAT_RGBX8888), }; +static const u32 omap3_dispc_supported_scaler_color_modes[] = { + DRM_FORMAT_XRGB8888, DRM_FORMAT_RGB565, DRM_FORMAT_YUYV, + DRM_FORMAT_UYVY, + 0, +}; + static const struct dispc_features omap24xx_dispc_feats = { .sw_start = 5, .fp_start = 15, @@ -4253,6 +4262,7 @@ static const struct dispc_features omap24xx_dispc_feats = { .num_reg_fields = ARRAY_SIZE(omap2_dispc_reg_fields), .overlay_caps = omap2_dispc_overlay_caps, .supported_color_modes = omap2_dispc_supported_color_modes, + .supported_scaler_color_modes = COLOR_ARRAY(DRM_FORMAT_XRGB8888), .num_mgrs = 2, .num_ovls = 3, .buffer_size_unit = 1, @@ -4287,6 +4297,7 @@ static const struct dispc_features omap34xx_rev1_0_dispc_feats = { .num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields), .overlay_caps = omap3430_dispc_overlay_caps, .supported_color_modes = omap3_dispc_supported_color_modes, + .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes, .num_mgrs = 2, .num_ovls = 3, .buffer_size_unit = 1, @@ -4321,6 +4332,7 @@ static const struct dispc_features omap34xx_rev3_0_dispc_feats = { .num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields), .overlay_caps = omap3430_dispc_overlay_caps, .supported_color_modes = omap3_dispc_supported_color_modes, + .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes, .num_mgrs = 2, .num_ovls = 3, .buffer_size_unit = 1, @@ -4355,6 +4367,7 @@ static const struct dispc_features omap36xx_dispc_feats = { .num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields), .overlay_caps = omap3630_dispc_overlay_caps, .supported_color_modes = omap3_dispc_supported_color_modes, + .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes, .num_mgrs = 2, .num_ovls = 3, .buffer_size_unit = 1, @@ -4389,6 +4402,7 @@ static const struct dispc_features am43xx_dispc_feats = { .num_reg_fields = ARRAY_SIZE(omap3_dispc_reg_fields), .overlay_caps = omap3430_dispc_overlay_caps, .supported_color_modes = omap3_dispc_supported_color_modes, + .supported_scaler_color_modes = omap3_dispc_supported_scaler_color_modes, .num_mgrs = 1, .num_ovls = 3, .buffer_size_unit = 1, @@ -4768,8 +4782,6 @@ static int dispc_bind(struct device *dev, struct device *master, void *data) platform_set_drvdata(pdev, dispc); dispc->dss = dss; - spin_lock_init(&dispc->control_lock); - /* * The OMAP3-based models can't be told apart using the compatible * string, use SoC device matching. diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index b30fcaa2d0f5..da16ea095f13 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -3548,7 +3548,7 @@ static int dsi_proto_config(struct dsi_data *dsi) static void dsi_proto_timings(struct dsi_data *dsi) { - unsigned int tlpx, tclk_zero, tclk_prepare, tclk_trail; + unsigned int tlpx, tclk_zero, tclk_prepare; unsigned int tclk_pre, tclk_post; unsigned int ths_prepare, ths_prepare_ths_zero, ths_zero; unsigned int ths_trail, ths_exit; @@ -3567,7 +3567,6 @@ static void dsi_proto_timings(struct dsi_data *dsi) r = dsi_read_reg(dsi, DSI_DSIPHY_CFG1); tlpx = FLD_GET(r, 20, 16) * 2; - tclk_trail = FLD_GET(r, 15, 8); tclk_zero = FLD_GET(r, 7, 0); r = dsi_read_reg(dsi, DSI_DSIPHY_CFG2); diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c index 4bdd63b57100..225ec808b01a 100644 --- a/drivers/gpu/drm/omapdrm/dss/dss.c +++ b/drivers/gpu/drm/omapdrm/dss/dss.c @@ -1598,3 +1598,40 @@ struct platform_driver omap_dsshw_driver = { .suppress_bind_attrs = true, }, }; + +/* INIT */ +static struct platform_driver * const omap_dss_drivers[] = { + &omap_dsshw_driver, + &omap_dispchw_driver, +#ifdef CONFIG_OMAP2_DSS_DSI + &omap_dsihw_driver, +#endif +#ifdef CONFIG_OMAP2_DSS_VENC + &omap_venchw_driver, +#endif +#ifdef CONFIG_OMAP4_DSS_HDMI + &omapdss_hdmi4hw_driver, +#endif +#ifdef CONFIG_OMAP5_DSS_HDMI + &omapdss_hdmi5hw_driver, +#endif +}; + +static int __init omap_dss_init(void) +{ + return platform_register_drivers(omap_dss_drivers, + ARRAY_SIZE(omap_dss_drivers)); +} + +static void __exit omap_dss_exit(void) +{ + platform_unregister_drivers(omap_dss_drivers, + ARRAY_SIZE(omap_dss_drivers)); +} + +module_init(omap_dss_init); +module_exit(omap_dss_exit); + +MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@ti.com>"); +MODULE_DESCRIPTION("OMAP2/3/4/5 Display Subsystem"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c index 5d5d5588ebc1..ea5d5c228534 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c @@ -542,8 +542,9 @@ static void hdmi_core_audio_config(struct hdmi_core_data *core, } /* Set ACR clock divisor */ - REG_FLD_MOD(av_base, - HDMI_CORE_AV_FREQ_SVAL, cfg->mclk_mode, 2, 0); + if (cfg->use_mclk) + REG_FLD_MOD(av_base, HDMI_CORE_AV_FREQ_SVAL, + cfg->mclk_mode, 2, 0); r = hdmi_read_reg(av_base, HDMI_CORE_AV_ACR_CTRL); /* @@ -675,7 +676,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, struct hdmi_audio_format audio_format; struct hdmi_audio_dma audio_dma; struct hdmi_core_audio_config acore; - int err, n, cts, channel_count; + int n, cts, channel_count; unsigned int fs_nr; bool word_length_16b = false; @@ -737,7 +738,7 @@ int hdmi4_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, return -EINVAL; } - err = hdmi_compute_acr(pclk, fs_nr, &n, &cts); + hdmi_compute_acr(pclk, fs_nr, &n, &cts); /* Audio clock regeneration settings */ acore.n = n; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c index 7400fb99d453..ff4d35c8771f 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c @@ -23,24 +23,12 @@ #include "hdmi5_core.h" -/* only 24 bit color depth used for now */ -static const struct csc_table csc_table_deepcolor[] = { - /* HDMI_DEEP_COLOR_24BIT */ - [0] = { 7036, 0, 0, 32, 0, 7036, 0, 32, 0, 0, 7036, 32, }, - /* HDMI_DEEP_COLOR_30BIT */ - [1] = { 7015, 0, 0, 128, 0, 7015, 0, 128, 0, 0, 7015, 128, }, - /* HDMI_DEEP_COLOR_36BIT */ - [2] = { 7010, 0, 0, 512, 0, 7010, 0, 512, 0, 0, 7010, 512, }, - /* FULL RANGE */ - [3] = { 8192, 0, 0, 0, 0, 8192, 0, 0, 0, 0, 8192, 0, }, -}; - static void hdmi_core_ddc_init(struct hdmi_core_data *core) { void __iomem *base = core->base; const unsigned long long iclk = 266000000; /* DSS L3 ICLK */ - const unsigned int ss_scl_high = 4600; /* ns */ - const unsigned int ss_scl_low = 5400; /* ns */ + const unsigned int ss_scl_high = 4700; /* ns */ + const unsigned int ss_scl_low = 5500; /* ns */ const unsigned int fs_scl_high = 600; /* ns */ const unsigned int fs_scl_low = 1300; /* ns */ const unsigned int sda_hold = 1000; /* ns */ @@ -397,14 +385,6 @@ static void hdmi_core_config_video_packetizer(struct hdmi_core_data *core) REG_FLD_MOD(base, HDMI_CORE_VP_CONF, clr_depth ? 0 : 2, 1, 0); } -static void hdmi_core_config_csc(struct hdmi_core_data *core) -{ - int clr_depth = 0; /* 24 bit color depth */ - - /* CSC_COLORDEPTH */ - REG_FLD_MOD(core->base, HDMI_CORE_CSC_SCALE, clr_depth, 7, 4); -} - static void hdmi_core_config_video_sampler(struct hdmi_core_data *core) { int video_mapping = 1; /* for 24 bit color depth */ @@ -469,47 +449,67 @@ static void hdmi_core_write_avi_infoframe(struct hdmi_core_data *core, REG_FLD_MOD(base, HDMI_CORE_FC_PRCONF, pr, 3, 0); } -static void hdmi_core_csc_config(struct hdmi_core_data *core, - struct csc_table csc_coeff) +static void hdmi_core_write_csc(struct hdmi_core_data *core, + const struct csc_table *csc_coeff) { void __iomem *base = core->base; - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_MSB, csc_coeff.a1 >> 8 , 6, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_LSB, csc_coeff.a1, 7, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_MSB, csc_coeff.a2 >> 8, 6, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_LSB, csc_coeff.a2, 7, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_MSB, csc_coeff.a3 >> 8, 6, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_LSB, csc_coeff.a3, 7, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_MSB, csc_coeff.a4 >> 8, 6, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_LSB, csc_coeff.a4, 7, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_MSB, csc_coeff.b1 >> 8, 6, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_LSB, csc_coeff.b1, 7, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_MSB, csc_coeff.b2 >> 8, 6, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_LSB, csc_coeff.b2, 7, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_MSB, csc_coeff.b3 >> 8, 6, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_LSB, csc_coeff.b3, 7, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_MSB, csc_coeff.b4 >> 8, 6, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_LSB, csc_coeff.b4, 7, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_MSB, csc_coeff.c1 >> 8, 6, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_LSB, csc_coeff.c1, 7, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_MSB, csc_coeff.c2 >> 8, 6, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_LSB, csc_coeff.c2, 7, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_MSB, csc_coeff.c3 >> 8, 6, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_LSB, csc_coeff.c3, 7, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_MSB, csc_coeff.c4 >> 8, 6, 0); - REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_LSB, csc_coeff.c4, 7, 0); - + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_MSB, csc_coeff->a1 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A1_LSB, csc_coeff->a1, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_MSB, csc_coeff->a2 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A2_LSB, csc_coeff->a2, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_MSB, csc_coeff->a3 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A3_LSB, csc_coeff->a3, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_MSB, csc_coeff->a4 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_A4_LSB, csc_coeff->a4, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_MSB, csc_coeff->b1 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B1_LSB, csc_coeff->b1, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_MSB, csc_coeff->b2 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B2_LSB, csc_coeff->b2, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_MSB, csc_coeff->b3 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B3_LSB, csc_coeff->b3, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_MSB, csc_coeff->b4 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_B4_LSB, csc_coeff->b4, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_MSB, csc_coeff->c1 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C1_LSB, csc_coeff->c1, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_MSB, csc_coeff->c2 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C2_LSB, csc_coeff->c2, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_MSB, csc_coeff->c3 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C3_LSB, csc_coeff->c3, 7, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_MSB, csc_coeff->c4 >> 8, 6, 0); + REG_FLD_MOD(base, HDMI_CORE_CSC_COEF_C4_LSB, csc_coeff->c4, 7, 0); + + /* enable CSC */ REG_FLD_MOD(base, HDMI_CORE_MC_FLOWCTRL, 0x1, 0, 0); } -static void hdmi_core_configure_range(struct hdmi_core_data *core) +static void hdmi_core_configure_range(struct hdmi_core_data *core, + enum hdmi_quantization_range range) { - struct csc_table csc_coeff = { 0 }; + static const struct csc_table csc_limited_range = { + 7036, 0, 0, 32, 0, 7036, 0, 32, 0, 0, 7036, 32 + }; + static const struct csc_table csc_full_range = { + 8192, 0, 0, 0, 0, 8192, 0, 0, 0, 0, 8192, 0 + }; + const struct csc_table *csc_coeff; + + /* CSC_COLORDEPTH = 24 bits*/ + REG_FLD_MOD(core->base, HDMI_CORE_CSC_SCALE, 0, 7, 4); + + switch (range) { + case HDMI_QUANTIZATION_RANGE_FULL: + csc_coeff = &csc_full_range; + break; - /* support limited range with 24 bit color depth for now */ - csc_coeff = csc_table_deepcolor[0]; + case HDMI_QUANTIZATION_RANGE_DEFAULT: + case HDMI_QUANTIZATION_RANGE_LIMITED: + default: + csc_coeff = &csc_limited_range; + break; + } - hdmi_core_csc_config(core, csc_coeff); + hdmi_core_write_csc(core, csc_coeff); } static void hdmi_core_enable_video_path(struct hdmi_core_data *core) @@ -600,9 +600,20 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp, struct videomode vm; struct hdmi_video_format video_format; struct hdmi_core_vid_config v_core_cfg; + enum hdmi_quantization_range range; hdmi_core_mask_interrupts(core); + if (cfg->hdmi_dvi_mode == HDMI_HDMI) { + char vic = cfg->infoframe.video_code; + + /* All CEA modes other than VIC 1 use limited quantization range. */ + range = vic > 1 ? HDMI_QUANTIZATION_RANGE_LIMITED : + HDMI_QUANTIZATION_RANGE_FULL; + } else { + range = HDMI_QUANTIZATION_RANGE_FULL; + } + hdmi_core_init(&v_core_cfg, cfg); hdmi_wp_init_vid_fmt_timings(&video_format, &vm, cfg); @@ -616,9 +627,8 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp, hdmi_wp_video_config_interface(wp, &vm); - /* support limited range with 24 bit color depth for now */ - hdmi_core_configure_range(core); - cfg->infoframe.quantization_range = HDMI_QUANTIZATION_RANGE_LIMITED; + hdmi_core_configure_range(core, range); + cfg->infoframe.quantization_range = range; /* * configure core video part, set software reset in the core @@ -628,7 +638,6 @@ void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp, hdmi_core_video_config(core, &v_core_cfg); hdmi_core_config_video_packetizer(core); - hdmi_core_config_csc(core); hdmi_core_config_video_sampler(core); if (cfg->hdmi_dvi_mode == HDMI_HDMI) @@ -798,7 +807,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, struct hdmi_audio_format audio_format; struct hdmi_audio_dma audio_dma; struct hdmi_core_audio_config core_cfg; - int err, n, cts, channel_count; + int n, cts, channel_count; unsigned int fs_nr; bool word_length_16b = false; @@ -841,7 +850,7 @@ int hdmi5_audio_config(struct hdmi_core_data *core, struct hdmi_wp_data *wp, return -EINVAL; } - err = hdmi_compute_acr(pclk, fs_nr, &n, &cts); + hdmi_compute_acr(pclk, fs_nr, &n, &cts); core_cfg.n = n; core_cfg.cts = cts; diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c index 14b41de44ebc..0693d34fca1b 100644 --- a/drivers/gpu/drm/omapdrm/dss/output.c +++ b/drivers/gpu/drm/omapdrm/dss/output.c @@ -12,6 +12,7 @@ #include <linux/of.h> #include <linux/of_graph.h> +#include <drm/drm_bridge.h> #include <drm/drm_panel.h> #include "dss.h" diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h index 835e6654fa82..43c1d096b021 100644 --- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h +++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.h @@ -113,7 +113,7 @@ extern struct platform_driver omap_dmm_driver; /* GEM bo flags -> tiler fmt */ static inline enum tiler_fmt gem2fmt(u32 flags) { - switch (flags & OMAP_BO_TILED) { + switch (flags & OMAP_BO_TILED_MASK) { case OMAP_BO_TILED_8: return TILFMT_8BIT; case OMAP_BO_TILED_16: diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 2983c003698e..b3e22c890c51 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -11,6 +11,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> #include <drm/drm_file.h> diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c index 6fe14111cd95..24bbe9f2a32e 100644 --- a/drivers/gpu/drm/omapdrm/omap_encoder.c +++ b/drivers/gpu/drm/omapdrm/omap_encoder.c @@ -6,6 +6,7 @@ #include <linux/list.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_edid.h> diff --git a/drivers/gpu/drm/omapdrm/omap_fb.c b/drivers/gpu/drm/omapdrm/omap_fb.c index 1b8b5108caf8..9aeab81dfb90 100644 --- a/drivers/gpu/drm/omapdrm/omap_fb.c +++ b/drivers/gpu/drm/omapdrm/omap_fb.c @@ -95,7 +95,7 @@ static u32 get_linear_addr(struct drm_framebuffer *fb, bool omap_framebuffer_supports_rotation(struct drm_framebuffer *fb) { - return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED; + return omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK; } /* Note: DRM rotates counter-clockwise, TILER & DSS rotates clockwise */ @@ -135,7 +135,6 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, { struct omap_framebuffer *omap_fb = to_omap_framebuffer(fb); const struct drm_format_info *format = omap_fb->format; - struct plane *plane = &omap_fb->planes[0]; u32 x, y, orient = 0; info->fourcc = fb->format->format; @@ -154,7 +153,7 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, x = state->src_x >> 16; y = state->src_y >> 16; - if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED) { + if (omap_gem_flags(fb->obj[0]) & OMAP_BO_TILED_MASK) { u32 w = state->src_w >> 16; u32 h = state->src_h >> 16; @@ -209,10 +208,8 @@ void omap_framebuffer_update_scanout(struct drm_framebuffer *fb, info->screen_width /= format->cpp[0]; if (fb->format->format == DRM_FORMAT_NV12) { - plane = &omap_fb->planes[1]; - if (info->rotation_type == OMAP_DSS_ROT_TILER) { - WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED)); + WARN_ON(!(omap_gem_flags(fb->obj[1]) & OMAP_BO_TILED_MASK)); omap_gem_rotated_dma_addr(fb->obj[1], orient, x/2, y/2, &info->p_uv_addr); } else { diff --git a/drivers/gpu/drm/omapdrm/omap_gem.c b/drivers/gpu/drm/omapdrm/omap_gem.c index 08f539efddfb..e518d93ca6df 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem.c +++ b/drivers/gpu/drm/omapdrm/omap_gem.c @@ -67,7 +67,7 @@ struct omap_gem_object { /** * # of users of dma_addr */ - u32 dma_addr_cnt; + refcount_t dma_addr_cnt; /** * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag @@ -196,7 +196,7 @@ static void omap_gem_evict(struct drm_gem_object *obj) struct omap_gem_object *omap_obj = to_omap_bo(obj); struct omap_drm_private *priv = obj->dev->dev_private; - if (omap_obj->flags & OMAP_BO_TILED) { + if (omap_obj->flags & OMAP_BO_TILED_MASK) { enum tiler_fmt fmt = gem2fmt(omap_obj->flags); int i; @@ -324,7 +324,7 @@ size_t omap_gem_mmap_size(struct drm_gem_object *obj) struct omap_gem_object *omap_obj = to_omap_bo(obj); size_t size = obj->size; - if (omap_obj->flags & OMAP_BO_TILED) { + if (omap_obj->flags & OMAP_BO_TILED_MASK) { /* for tiled buffers, the virtual size has stride rounded up * to 4kb.. (to hide the fact that row n+1 might start 16kb or * 32kb later!). But we don't back the entire buffer with @@ -513,7 +513,7 @@ vm_fault_t omap_gem_fault(struct vm_fault *vmf) * probably trigger put_pages()? */ - if (omap_obj->flags & OMAP_BO_TILED) + if (omap_obj->flags & OMAP_BO_TILED_MASK) ret = omap_gem_fault_2d(obj, vma, vmf); else ret = omap_gem_fault_1d(obj, vma, vmf); @@ -773,18 +773,20 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) mutex_lock(&omap_obj->lock); if (!omap_gem_is_contiguous(omap_obj) && priv->has_dmm) { - if (omap_obj->dma_addr_cnt == 0) { + if (refcount_read(&omap_obj->dma_addr_cnt) == 0) { u32 npages = obj->size >> PAGE_SHIFT; enum tiler_fmt fmt = gem2fmt(omap_obj->flags); struct tiler_block *block; BUG_ON(omap_obj->block); + refcount_set(&omap_obj->dma_addr_cnt, 1); + ret = omap_gem_attach_pages(obj); if (ret) goto fail; - if (omap_obj->flags & OMAP_BO_TILED) { + if (omap_obj->flags & OMAP_BO_TILED_MASK) { block = tiler_reserve_2d(fmt, omap_obj->width, omap_obj->height, 0); @@ -813,13 +815,15 @@ int omap_gem_pin(struct drm_gem_object *obj, dma_addr_t *dma_addr) omap_obj->block = block; DBG("got dma address: %pad", &omap_obj->dma_addr); + } else { + refcount_inc(&omap_obj->dma_addr_cnt); } - omap_obj->dma_addr_cnt++; - - *dma_addr = omap_obj->dma_addr; + if (dma_addr) + *dma_addr = omap_obj->dma_addr; } else if (omap_gem_is_contiguous(omap_obj)) { - *dma_addr = omap_obj->dma_addr; + if (dma_addr) + *dma_addr = omap_obj->dma_addr; } else { ret = -EINVAL; goto fail; @@ -832,38 +836,46 @@ fail: } /** + * omap_gem_unpin_locked() - Unpin a GEM object from memory + * @obj: the GEM object + * + * omap_gem_unpin() without locking. + */ +static void omap_gem_unpin_locked(struct drm_gem_object *obj) +{ + struct omap_gem_object *omap_obj = to_omap_bo(obj); + int ret; + + if (refcount_dec_and_test(&omap_obj->dma_addr_cnt)) { + ret = tiler_unpin(omap_obj->block); + if (ret) { + dev_err(obj->dev->dev, + "could not unpin pages: %d\n", ret); + } + ret = tiler_release(omap_obj->block); + if (ret) { + dev_err(obj->dev->dev, + "could not release unmap: %d\n", ret); + } + omap_obj->dma_addr = 0; + omap_obj->block = NULL; + } +} + +/** * omap_gem_unpin() - Unpin a GEM object from memory * @obj: the GEM object * * Unpin the given GEM object previously pinned with omap_gem_pin(). Pins are - * reference-counted, the actualy unpin will only be performed when the number + * reference-counted, the actual unpin will only be performed when the number * of calls to this function matches the number of calls to omap_gem_pin(). */ void omap_gem_unpin(struct drm_gem_object *obj) { struct omap_gem_object *omap_obj = to_omap_bo(obj); - int ret; mutex_lock(&omap_obj->lock); - - if (omap_obj->dma_addr_cnt > 0) { - omap_obj->dma_addr_cnt--; - if (omap_obj->dma_addr_cnt == 0) { - ret = tiler_unpin(omap_obj->block); - if (ret) { - dev_err(obj->dev->dev, - "could not unpin pages: %d\n", ret); - } - ret = tiler_release(omap_obj->block); - if (ret) { - dev_err(obj->dev->dev, - "could not release unmap: %d\n", ret); - } - omap_obj->dma_addr = 0; - omap_obj->block = NULL; - } - } - + omap_gem_unpin_locked(obj); mutex_unlock(&omap_obj->lock); } @@ -879,8 +891,8 @@ int omap_gem_rotated_dma_addr(struct drm_gem_object *obj, u32 orient, mutex_lock(&omap_obj->lock); - if ((omap_obj->dma_addr_cnt > 0) && omap_obj->block && - (omap_obj->flags & OMAP_BO_TILED)) { + if ((refcount_read(&omap_obj->dma_addr_cnt) > 0) && omap_obj->block && + (omap_obj->flags & OMAP_BO_TILED_MASK)) { *dma_addr = tiler_tsptr(omap_obj->block, orient, x, y); ret = 0; } @@ -895,7 +907,7 @@ int omap_gem_tiled_stride(struct drm_gem_object *obj, u32 orient) { struct omap_gem_object *omap_obj = to_omap_bo(obj); int ret = -EINVAL; - if (omap_obj->flags & OMAP_BO_TILED) + if (omap_obj->flags & OMAP_BO_TILED_MASK) ret = tiler_stride(gem2fmt(omap_obj->flags), orient); return ret; } @@ -1030,10 +1042,11 @@ void omap_gem_describe(struct drm_gem_object *obj, struct seq_file *m) seq_printf(m, "%08x: %2d (%2d) %08llx %pad (%2d) %p %4d", omap_obj->flags, obj->name, kref_read(&obj->refcount), - off, &omap_obj->dma_addr, omap_obj->dma_addr_cnt, + off, &omap_obj->dma_addr, + refcount_read(&omap_obj->dma_addr_cnt), omap_obj->vaddr, omap_obj->roll); - if (omap_obj->flags & OMAP_BO_TILED) { + if (omap_obj->flags & OMAP_BO_TILED_MASK) { seq_printf(m, " %dx%d", omap_obj->width, omap_obj->height); if (omap_obj->block) { struct tcm_area *area = &omap_obj->block->area; @@ -1093,7 +1106,7 @@ void omap_gem_free_object(struct drm_gem_object *obj) mutex_lock(&omap_obj->lock); /* The object should not be pinned. */ - WARN_ON(omap_obj->dma_addr_cnt > 0); + WARN_ON(refcount_read(&omap_obj->dma_addr_cnt) > 0); if (omap_obj->pages) { if (omap_obj->flags & OMAP_BO_MEM_DMABUF) @@ -1120,6 +1133,38 @@ void omap_gem_free_object(struct drm_gem_object *obj) kfree(omap_obj); } +static bool omap_gem_validate_flags(struct drm_device *dev, u32 flags) +{ + struct omap_drm_private *priv = dev->dev_private; + + switch (flags & OMAP_BO_CACHE_MASK) { + case OMAP_BO_CACHED: + case OMAP_BO_WC: + case OMAP_BO_CACHE_MASK: + break; + + default: + return false; + } + + if (flags & OMAP_BO_TILED_MASK) { + if (!priv->usergart) + return false; + + switch (flags & OMAP_BO_TILED_MASK) { + case OMAP_BO_TILED_8: + case OMAP_BO_TILED_16: + case OMAP_BO_TILED_32: + break; + + default: + return false; + } + } + + return true; +} + /* GEM buffer object constructor */ struct drm_gem_object *omap_gem_new(struct drm_device *dev, union omap_gem_size gsize, u32 flags) @@ -1131,18 +1176,15 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, size_t size; int ret; - /* Validate the flags and compute the memory and cache flags. */ - if (flags & OMAP_BO_TILED) { - if (!priv->usergart) { - dev_err(dev->dev, "Tiled buffers require DMM\n"); - return NULL; - } + if (!omap_gem_validate_flags(dev, flags)) + return NULL; + /* Validate the flags and compute the memory and cache flags. */ + if (flags & OMAP_BO_TILED_MASK) { /* * Tiled buffers are always shmem paged backed. When they are * scanned out, they are remapped into DMM/TILER. */ - flags &= ~OMAP_BO_SCANOUT; flags |= OMAP_BO_MEM_SHMEM; /* @@ -1153,9 +1195,8 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, flags |= tiler_get_cpu_cache_flags(); } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) { /* - * OMAP_BO_SCANOUT hints that the buffer doesn't need to be - * tiled. However, to lower the pressure on memory allocation, - * use contiguous memory only if no TILER is available. + * If we don't have DMM, we must allocate scanout buffers + * from contiguous DMA memory. */ flags |= OMAP_BO_MEM_DMA_API; } else if (!(flags & OMAP_BO_MEM_DMABUF)) { @@ -1174,7 +1215,7 @@ struct drm_gem_object *omap_gem_new(struct drm_device *dev, omap_obj->flags = flags; mutex_init(&omap_obj->lock); - if (flags & OMAP_BO_TILED) { + if (flags & OMAP_BO_TILED_MASK) { /* * For tiled buffers align dimensions to slot boundaries and * calculate size based on aligned dimensions. diff --git a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c index e8c3ae7ac77e..7344bb61936c 100644 --- a/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c +++ b/drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c @@ -67,7 +67,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer, { struct drm_gem_object *obj = buffer->priv; struct page **pages; - if (omap_gem_flags(obj) & OMAP_BO_TILED) { + if (omap_gem_flags(obj) & OMAP_BO_TILED_MASK) { /* TODO we would need to pin at least part of the buffer to * get de-tiled view. For now just reject it. */ diff --git a/drivers/gpu/drm/panel/panel-arm-versatile.c b/drivers/gpu/drm/panel/panel-arm-versatile.c index 5f72c922a04b..a0574dc03e16 100644 --- a/drivers/gpu/drm/panel/panel-arm-versatile.c +++ b/drivers/gpu/drm/panel/panel-arm-versatile.c @@ -350,9 +350,8 @@ static int versatile_panel_probe(struct platform_device *pdev) dev_info(dev, "panel mounted on IB2 daughterboard\n"); } - drm_panel_init(&vpanel->panel); - vpanel->panel.dev = dev; - vpanel->panel.funcs = &versatile_panel_drm_funcs; + drm_panel_init(&vpanel->panel, dev, &versatile_panel_drm_funcs, + DRM_MODE_CONNECTOR_DPI); return drm_panel_add(&vpanel->panel); } diff --git a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c index dabf59e0f56f..98f184b81187 100644 --- a/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c +++ b/drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c @@ -204,9 +204,8 @@ static int feiyang_dsi_probe(struct mipi_dsi_device *dsi) mipi_dsi_set_drvdata(dsi, ctx); ctx->dsi = dsi; - drm_panel_init(&ctx->panel); - ctx->panel.dev = &dsi->dev; - ctx->panel.funcs = &feiyang_funcs; + drm_panel_init(&ctx->panel, &dsi->dev, &feiyang_funcs, + DRM_MODE_CONNECTOR_DSI); ctx->dvdd = devm_regulator_get(&dsi->dev, "dvdd"); if (IS_ERR(ctx->dvdd)) { diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c index 3c58f63adbf7..24955bec1958 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c @@ -895,9 +895,8 @@ static int ili9322_probe(struct spi_device *spi) ili->input = ili->conf->input; } - drm_panel_init(&ili->panel); - ili->panel.dev = dev; - ili->panel.funcs = &ili9322_drm_funcs; + drm_panel_init(&ili->panel, dev, &ili9322_drm_funcs, + DRM_MODE_CONNECTOR_DPI); return drm_panel_add(&ili->panel); } diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c index 3ad4a46c4e94..e8789e460a16 100644 --- a/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c +++ b/drivers/gpu/drm/panel/panel-ilitek-ili9881c.c @@ -433,9 +433,8 @@ static int ili9881c_dsi_probe(struct mipi_dsi_device *dsi) mipi_dsi_set_drvdata(dsi, ctx); ctx->dsi = dsi; - drm_panel_init(&ctx->panel); - ctx->panel.dev = &dsi->dev; - ctx->panel.funcs = &ili9881c_funcs; + drm_panel_init(&ctx->panel, &dsi->dev, &ili9881c_funcs, + DRM_MODE_CONNECTOR_DSI); ctx->power = devm_regulator_get(&dsi->dev, "power"); if (IS_ERR(ctx->power)) { diff --git a/drivers/gpu/drm/panel/panel-innolux-p079zca.c b/drivers/gpu/drm/panel/panel-innolux-p079zca.c index d92d1c98878c..83df1ac4211f 100644 --- a/drivers/gpu/drm/panel/panel-innolux-p079zca.c +++ b/drivers/gpu/drm/panel/panel-innolux-p079zca.c @@ -487,9 +487,8 @@ static int innolux_panel_add(struct mipi_dsi_device *dsi, if (IS_ERR(innolux->backlight)) return PTR_ERR(innolux->backlight); - drm_panel_init(&innolux->base); - innolux->base.funcs = &innolux_panel_funcs; - innolux->base.dev = dev; + drm_panel_init(&innolux->base, dev, &innolux_panel_funcs, + DRM_MODE_CONNECTOR_DSI); err = drm_panel_add(&innolux->base); if (err < 0) diff --git a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c index ff3e89e61e3f..56364a93f0b8 100644 --- a/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c +++ b/drivers/gpu/drm/panel/panel-jdi-lt070me05000.c @@ -437,9 +437,8 @@ static int jdi_panel_add(struct jdi_panel *jdi) return ret; } - drm_panel_init(&jdi->base); - jdi->base.funcs = &jdi_panel_funcs; - jdi->base.dev = &jdi->dsi->dev; + drm_panel_init(&jdi->base, &jdi->dsi->dev, &jdi_panel_funcs, + DRM_MODE_CONNECTOR_DSI); ret = drm_panel_add(&jdi->base); diff --git a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c index 3ac04eb8d0fe..45f96556ec8c 100644 --- a/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c +++ b/drivers/gpu/drm/panel/panel-kingdisplay-kd097d04.c @@ -391,9 +391,8 @@ static int kingdisplay_panel_add(struct kingdisplay_panel *kingdisplay) if (IS_ERR(kingdisplay->backlight)) return PTR_ERR(kingdisplay->backlight); - drm_panel_init(&kingdisplay->base); - kingdisplay->base.funcs = &kingdisplay_panel_funcs; - kingdisplay->base.dev = &kingdisplay->link->dev; + drm_panel_init(&kingdisplay->base, &kingdisplay->link->dev, + &kingdisplay_panel_funcs, DRM_MODE_CONNECTOR_DSI); return drm_panel_add(&kingdisplay->base); } diff --git a/drivers/gpu/drm/panel/panel-lg-lb035q02.c b/drivers/gpu/drm/panel/panel-lg-lb035q02.c index ee4379729a5b..7a1385e834f0 100644 --- a/drivers/gpu/drm/panel/panel-lg-lb035q02.c +++ b/drivers/gpu/drm/panel/panel-lg-lb035q02.c @@ -196,9 +196,8 @@ static int lb035q02_probe(struct spi_device *spi) if (ret < 0) return ret; - drm_panel_init(&lcd->panel); - lcd->panel.dev = &lcd->spi->dev; - lcd->panel.funcs = &lb035q02_funcs; + drm_panel_init(&lcd->panel, &lcd->spi->dev, &lb035q02_funcs, + DRM_MODE_CONNECTOR_DPI); return drm_panel_add(&lcd->panel); } diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c index 41bf02d122a1..db4865a4c2b9 100644 --- a/drivers/gpu/drm/panel/panel-lg-lg4573.c +++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c @@ -259,9 +259,8 @@ static int lg4573_probe(struct spi_device *spi) return ret; } - drm_panel_init(&ctx->panel); - ctx->panel.dev = &spi->dev; - ctx->panel.funcs = &lg4573_drm_funcs; + drm_panel_init(&ctx->panel, &spi->dev, &lg4573_drm_funcs, + DRM_MODE_CONNECTOR_DPI); return drm_panel_add(&ctx->panel); } diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c index ad47cc95459e..2405f26e5d31 100644 --- a/drivers/gpu/drm/panel/panel-lvds.c +++ b/drivers/gpu/drm/panel/panel-lvds.c @@ -197,7 +197,6 @@ static int panel_lvds_parse_dt(struct panel_lvds *lvds) static int panel_lvds_probe(struct platform_device *pdev) { struct panel_lvds *lvds; - struct device_node *np; int ret; lvds = devm_kzalloc(&pdev->dev, sizeof(*lvds), GFP_KERNEL); @@ -243,14 +242,9 @@ static int panel_lvds_probe(struct platform_device *pdev) return ret; } - np = of_parse_phandle(lvds->dev->of_node, "backlight", 0); - if (np) { - lvds->backlight = of_find_backlight_by_node(np); - of_node_put(np); - - if (!lvds->backlight) - return -EPROBE_DEFER; - } + lvds->backlight = devm_of_find_backlight(lvds->dev); + if (IS_ERR(lvds->backlight)) + return PTR_ERR(lvds->backlight); /* * TODO: Handle all power supplies specified in the DT node in a generic @@ -260,20 +254,15 @@ static int panel_lvds_probe(struct platform_device *pdev) */ /* Register the panel. */ - drm_panel_init(&lvds->panel); - lvds->panel.dev = lvds->dev; - lvds->panel.funcs = &panel_lvds_funcs; + drm_panel_init(&lvds->panel, lvds->dev, &panel_lvds_funcs, + DRM_MODE_CONNECTOR_LVDS); ret = drm_panel_add(&lvds->panel); if (ret < 0) - goto error; + return ret; dev_set_drvdata(lvds->dev, lvds); return 0; - -error: - put_device(&lvds->backlight->dev); - return ret; } static int panel_lvds_remove(struct platform_device *pdev) @@ -284,9 +273,6 @@ static int panel_lvds_remove(struct platform_device *pdev) panel_lvds_disable(&lvds->panel); - if (lvds->backlight) - put_device(&lvds->backlight->dev); - return 0; } diff --git a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c index 20f17e46e65d..fd593532ab23 100644 --- a/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c +++ b/drivers/gpu/drm/panel/panel-nec-nl8048hl11.c @@ -205,9 +205,8 @@ static int nl8048_probe(struct spi_device *spi) if (ret < 0) return ret; - drm_panel_init(&lcd->panel); - lcd->panel.dev = &lcd->spi->dev; - lcd->panel.funcs = &nl8048_funcs; + drm_panel_init(&lcd->panel, &lcd->spi->dev, &nl8048_funcs, + DRM_MODE_CONNECTOR_DPI); return drm_panel_add(&lcd->panel); } diff --git a/drivers/gpu/drm/panel/panel-novatek-nt39016.c b/drivers/gpu/drm/panel/panel-novatek-nt39016.c index 2ad1063b068d..60ccedce530c 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt39016.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt39016.c @@ -292,9 +292,8 @@ static int nt39016_probe(struct spi_device *spi) return err; } - drm_panel_init(&panel->drm_panel); - panel->drm_panel.dev = dev; - panel->drm_panel.funcs = &nt39016_funcs; + drm_panel_init(&panel->drm_panel, dev, &nt39016_funcs, + DRM_MODE_CONNECTOR_DPI); err = drm_panel_add(&panel->drm_panel); if (err < 0) { diff --git a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c index 2bae1db3ff34..f2a72ee6ee07 100644 --- a/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c +++ b/drivers/gpu/drm/panel/panel-olimex-lcd-olinuxino.c @@ -288,9 +288,8 @@ static int lcd_olinuxino_probe(struct i2c_client *client, if (IS_ERR(lcd->backlight)) return PTR_ERR(lcd->backlight); - drm_panel_init(&lcd->panel); - lcd->panel.dev = dev; - lcd->panel.funcs = &lcd_olinuxino_funcs; + drm_panel_init(&lcd->panel, dev, &lcd_olinuxino_funcs, + DRM_MODE_CONNECTOR_DPI); return drm_panel_add(&lcd->panel); } diff --git a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c index c7b48df8869a..bf1f928b215f 100644 --- a/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c +++ b/drivers/gpu/drm/panel/panel-orisetech-otm8009a.c @@ -455,9 +455,8 @@ static int otm8009a_probe(struct mipi_dsi_device *dsi) dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM; - drm_panel_init(&ctx->panel); - ctx->panel.dev = dev; - ctx->panel.funcs = &otm8009a_drm_funcs; + drm_panel_init(&ctx->panel, dev, &otm8009a_drm_funcs, + DRM_MODE_CONNECTOR_DSI); ctx->bl_dev = devm_backlight_device_register(dev, dev_name(dev), dsi->host->dev, ctx, diff --git a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c index e0e20ecff916..2b40913899d8 100644 --- a/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c +++ b/drivers/gpu/drm/panel/panel-osd-osd101t2587-53ts.c @@ -166,9 +166,8 @@ static int osd101t2587_panel_add(struct osd101t2587_panel *osd101t2587) if (IS_ERR(osd101t2587->backlight)) return PTR_ERR(osd101t2587->backlight); - drm_panel_init(&osd101t2587->base); - osd101t2587->base.funcs = &osd101t2587_panel_funcs; - osd101t2587->base.dev = &osd101t2587->dsi->dev; + drm_panel_init(&osd101t2587->base, &osd101t2587->dsi->dev, + &osd101t2587_panel_funcs, DRM_MODE_CONNECTOR_DSI); return drm_panel_add(&osd101t2587->base); } diff --git a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c index 3dff0b3f73c2..664605071d34 100644 --- a/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c +++ b/drivers/gpu/drm/panel/panel-panasonic-vvx10f034n00.c @@ -223,9 +223,8 @@ static int wuxga_nt_panel_add(struct wuxga_nt_panel *wuxga_nt) return -EPROBE_DEFER; } - drm_panel_init(&wuxga_nt->base); - wuxga_nt->base.funcs = &wuxga_nt_panel_funcs; - wuxga_nt->base.dev = &wuxga_nt->dsi->dev; + drm_panel_init(&wuxga_nt->base, &wuxga_nt->dsi->dev, + &wuxga_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI); ret = drm_panel_add(&wuxga_nt->base); if (ret < 0) diff --git a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c index b5b14aa059ea..09824e92fc78 100644 --- a/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c +++ b/drivers/gpu/drm/panel/panel-raspberrypi-touchscreen.c @@ -426,8 +426,8 @@ static int rpi_touchscreen_probe(struct i2c_client *i2c, return PTR_ERR(ts->dsi); } - ts->base.dev = dev; - ts->base.funcs = &rpi_touchscreen_funcs; + drm_panel_init(&ts->base, dev, &rpi_touchscreen_funcs, + DRM_MODE_CONNECTOR_DSI); /* This appears last, as it's what will unblock the DSI host * driver's component bind function. diff --git a/drivers/gpu/drm/panel/panel-raydium-rm67191.c b/drivers/gpu/drm/panel/panel-raydium-rm67191.c index 6a5d37006103..fd67fc6185c4 100644 --- a/drivers/gpu/drm/panel/panel-raydium-rm67191.c +++ b/drivers/gpu/drm/panel/panel-raydium-rm67191.c @@ -606,9 +606,8 @@ static int rad_panel_probe(struct mipi_dsi_device *dsi) if (ret) return ret; - drm_panel_init(&panel->panel); - panel->panel.funcs = &rad_panel_funcs; - panel->panel.dev = dev; + drm_panel_init(&panel->panel, dev, &rad_panel_funcs, + DRM_MODE_CONNECTOR_DSI); dev_set_drvdata(dev, panel); ret = drm_panel_add(&panel->panel); diff --git a/drivers/gpu/drm/panel/panel-raydium-rm68200.c b/drivers/gpu/drm/panel/panel-raydium-rm68200.c index ba889625ad43..994e855721f4 100644 --- a/drivers/gpu/drm/panel/panel-raydium-rm68200.c +++ b/drivers/gpu/drm/panel/panel-raydium-rm68200.c @@ -404,9 +404,8 @@ static int rm68200_probe(struct mipi_dsi_device *dsi) dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST | MIPI_DSI_MODE_LPM; - drm_panel_init(&ctx->panel); - ctx->panel.dev = dev; - ctx->panel.funcs = &rm68200_drm_funcs; + drm_panel_init(&ctx->panel, dev, &rm68200_drm_funcs, + DRM_MODE_CONNECTOR_DSI); drm_panel_add(&ctx->panel); diff --git a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c index b9109922397f..31234b79d3b1 100644 --- a/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c +++ b/drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c @@ -343,9 +343,8 @@ static int jh057n_probe(struct mipi_dsi_device *dsi) return ret; } - drm_panel_init(&ctx->panel); - ctx->panel.dev = dev; - ctx->panel.funcs = &jh057n_drm_funcs; + drm_panel_init(&ctx->panel, dev, &jh057n_drm_funcs, + DRM_MODE_CONNECTOR_DSI); drm_panel_add(&ctx->panel); diff --git a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c index 3c15764f0c03..170a5cda21b9 100644 --- a/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c +++ b/drivers/gpu/drm/panel/panel-ronbo-rb070d30.c @@ -173,9 +173,8 @@ static int rb070d30_panel_dsi_probe(struct mipi_dsi_device *dsi) mipi_dsi_set_drvdata(dsi, ctx); ctx->dsi = dsi; - drm_panel_init(&ctx->panel); - ctx->panel.dev = &dsi->dev; - ctx->panel.funcs = &rb070d30_panel_funcs; + drm_panel_init(&ctx->panel, &dsi->dev, &rb070d30_panel_funcs, + DRM_MODE_CONNECTOR_DSI); ctx->gpios.reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->gpios.reset)) { diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c index 3be902dcedc0..250809ba37c7 100644 --- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c +++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c @@ -351,9 +351,8 @@ static int ld9040_probe(struct spi_device *spi) return ret; } - drm_panel_init(&ctx->panel); - ctx->panel.dev = dev; - ctx->panel.funcs = &ld9040_drm_funcs; + drm_panel_init(&ctx->panel, dev, &ld9040_drm_funcs, + DRM_MODE_CONNECTOR_DPI); return drm_panel_add(&ctx->panel); } diff --git a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c index f75bef24e050..e3a0397e953e 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6d16d0.c @@ -215,9 +215,8 @@ static int s6d16d0_probe(struct mipi_dsi_device *dsi) return ret; } - drm_panel_init(&s6->panel); - s6->panel.dev = dev; - s6->panel.funcs = &s6d16d0_drm_funcs; + drm_panel_init(&s6->panel, dev, &s6d16d0_drm_funcs, + DRM_MODE_CONNECTOR_DSI); ret = drm_panel_add(&s6->panel); if (ret < 0) diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c index b923de23ed65..938ab72c5540 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e3ha2.c @@ -732,9 +732,8 @@ static int s6e3ha2_probe(struct mipi_dsi_device *dsi) ctx->bl_dev->props.brightness = S6E3HA2_DEFAULT_BRIGHTNESS; ctx->bl_dev->props.power = FB_BLANK_POWERDOWN; - drm_panel_init(&ctx->panel); - ctx->panel.dev = dev; - ctx->panel.funcs = &s6e3ha2_drm_funcs; + drm_panel_init(&ctx->panel, dev, &s6e3ha2_drm_funcs, + DRM_MODE_CONNECTOR_DSI); ret = drm_panel_add(&ctx->panel); if (ret < 0) diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c index cd90fa700c49..a60635e9226d 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63j0x03.c @@ -466,9 +466,8 @@ static int s6e63j0x03_probe(struct mipi_dsi_device *dsi) return PTR_ERR(ctx->reset_gpio); } - drm_panel_init(&ctx->panel); - ctx->panel.dev = dev; - ctx->panel.funcs = &s6e63j0x03_funcs; + drm_panel_init(&ctx->panel, dev, &s6e63j0x03_funcs, + DRM_MODE_CONNECTOR_DSI); ctx->bl_dev = backlight_device_register("s6e63j0x03", dev, ctx, &s6e63j0x03_bl_ops, NULL); diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c index 142d395ea512..ba01af0b14fd 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e63m0.c @@ -473,9 +473,8 @@ static int s6e63m0_probe(struct spi_device *spi) return ret; } - drm_panel_init(&ctx->panel); - ctx->panel.dev = dev; - ctx->panel.funcs = &s6e63m0_drm_funcs; + drm_panel_init(&ctx->panel, dev, &s6e63m0_drm_funcs, + DRM_MODE_CONNECTOR_DPI); ret = s6e63m0_backlight_register(ctx); if (ret < 0) diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c index 81858267723a..dbced6501204 100644 --- a/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c +++ b/drivers/gpu/drm/panel/panel-samsung-s6e8aa0.c @@ -1017,9 +1017,8 @@ static int s6e8aa0_probe(struct mipi_dsi_device *dsi) ctx->brightness = GAMMA_LEVEL_NUM - 1; - drm_panel_init(&ctx->panel); - ctx->panel.dev = dev; - ctx->panel.funcs = &s6e8aa0_drm_funcs; + drm_panel_init(&ctx->panel, dev, &s6e8aa0_drm_funcs, + DRM_MODE_CONNECTOR_DSI); ret = drm_panel_add(&ctx->panel); if (ret < 0) diff --git a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c index 18b22b1294fb..b3619ba443bd 100644 --- a/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c +++ b/drivers/gpu/drm/panel/panel-seiko-43wvf1g.c @@ -274,9 +274,8 @@ static int seiko_panel_probe(struct device *dev, return -EPROBE_DEFER; } - drm_panel_init(&panel->base); - panel->base.dev = dev; - panel->base.funcs = &seiko_panel_funcs; + drm_panel_init(&panel->base, dev, &seiko_panel_funcs, + DRM_MODE_CONNECTOR_DPI); err = drm_panel_add(&panel->base); if (err < 0) diff --git a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c index e910b4ad1310..5e136c3ba185 100644 --- a/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c +++ b/drivers/gpu/drm/panel/panel-sharp-lq101r1sx01.c @@ -329,9 +329,8 @@ static int sharp_panel_add(struct sharp_panel *sharp) if (IS_ERR(sharp->backlight)) return PTR_ERR(sharp->backlight); - drm_panel_init(&sharp->base); - sharp->base.funcs = &sharp_panel_funcs; - sharp->base.dev = &sharp->link1->dev; + drm_panel_init(&sharp->base, &sharp->link1->dev, &sharp_panel_funcs, + DRM_MODE_CONNECTOR_DSI); return drm_panel_add(&sharp->base); } diff --git a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c index 46cd9a250129..eeab7998c7de 100644 --- a/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c +++ b/drivers/gpu/drm/panel/panel-sharp-ls037v7dw01.c @@ -185,9 +185,8 @@ static int ls037v7dw01_probe(struct platform_device *pdev) return PTR_ERR(lcd->ud_gpio); } - drm_panel_init(&lcd->panel); - lcd->panel.dev = &pdev->dev; - lcd->panel.funcs = &ls037v7dw01_funcs; + drm_panel_init(&lcd->panel, &pdev->dev, &ls037v7dw01_funcs, + DRM_MODE_CONNECTOR_DPI); return drm_panel_add(&lcd->panel); } diff --git a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c index c39abde9f9f1..b963ba4ab589 100644 --- a/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c +++ b/drivers/gpu/drm/panel/panel-sharp-ls043t1le01.c @@ -264,9 +264,8 @@ static int sharp_nt_panel_add(struct sharp_nt_panel *sharp_nt) if (IS_ERR(sharp_nt->backlight)) return PTR_ERR(sharp_nt->backlight); - drm_panel_init(&sharp_nt->base); - sharp_nt->base.funcs = &sharp_nt_panel_funcs; - sharp_nt->base.dev = &sharp_nt->dsi->dev; + drm_panel_init(&sharp_nt->base, &sharp_nt->dsi->dev, + &sharp_nt_panel_funcs, DRM_MODE_CONNECTOR_DSI); return drm_panel_add(&sharp_nt->base); } diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 28fa6ba7b767..5d487686d25c 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -94,6 +94,7 @@ struct panel_desc { u32 bus_format; u32 bus_flags; + int connector_type; }; struct panel_simple { @@ -464,9 +465,8 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc) if (!of_get_display_timing(dev->of_node, "panel-timing", &dt)) panel_simple_parse_panel_timing_node(dev, panel, &dt); - drm_panel_init(&panel->base); - panel->base.dev = dev; - panel->base.funcs = &panel_simple_funcs; + drm_panel_init(&panel->base, dev, &panel_simple_funcs, + desc->connector_type); err = drm_panel_add(&panel->base); if (err < 0) @@ -833,6 +833,7 @@ static const struct panel_desc auo_g133han01 = { .unprepare = 1000, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct display_timing auo_g185han01_timings = { @@ -862,6 +863,7 @@ static const struct panel_desc auo_g185han01 = { .unprepare = 1000, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct display_timing auo_p320hvn03_timings = { @@ -890,6 +892,7 @@ static const struct panel_desc auo_p320hvn03 = { .unprepare = 500, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct drm_display_mode auo_t215hvn01_mode = { @@ -1205,6 +1208,7 @@ static const struct panel_desc dlc_dlc0700yzg_1 = { .disable = 200, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct display_timing dlc_dlc1010gig_timing = { @@ -1235,6 +1239,7 @@ static const struct panel_desc dlc_dlc1010gig = { .unprepare = 60, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct drm_display_mode edt_et035012dm6_mode = { @@ -1501,6 +1506,7 @@ static const struct panel_desc hannstar_hsd070pww1 = { .height = 94, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct display_timing hannstar_hsd100pxn1_timing = { @@ -1525,6 +1531,7 @@ static const struct panel_desc hannstar_hsd100pxn1 = { .height = 152, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct drm_display_mode hitachi_tx23d38vm0caa_mode = { @@ -1631,6 +1638,7 @@ static const struct panel_desc innolux_g070y2_l01 = { .unprepare = 800, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct display_timing innolux_g101ice_l01_timing = { @@ -1659,6 +1667,7 @@ static const struct panel_desc innolux_g101ice_l01 = { .disable = 200, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct display_timing innolux_g121i1_l01_timing = { @@ -1686,6 +1695,7 @@ static const struct panel_desc innolux_g121i1_l01 = { .disable = 20, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct drm_display_mode innolux_g121x1_l03_mode = { @@ -1869,6 +1879,7 @@ static const struct panel_desc koe_tx31d200vm0baa = { .height = 109, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct display_timing kyo_tcg121xglp_timing = { @@ -1893,6 +1904,7 @@ static const struct panel_desc kyo_tcg121xglp = { .height = 184, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct drm_display_mode lemaker_bl035_rgb_002_mode = { @@ -1941,6 +1953,7 @@ static const struct panel_desc lg_lb070wv8 = { .height = 91, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct drm_display_mode lg_lp079qx1_sp0v_mode = { @@ -2063,6 +2076,7 @@ static const struct panel_desc mitsubishi_aa070mc01 = { .disable = 400, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, .bus_flags = DRM_BUS_FLAG_DE_HIGH, }; @@ -2091,6 +2105,7 @@ static const struct panel_desc nec_nl12880bc20_05 = { .disable = 50, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct drm_display_mode nec_nl4827hc19_05b_mode = { @@ -2193,6 +2208,7 @@ static const struct panel_desc nlt_nl192108ac18_02d = { .unprepare = 500, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct drm_display_mode nvd_9128_mode = { @@ -2216,6 +2232,7 @@ static const struct panel_desc nvd_9128 = { .height = 88, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct display_timing okaya_rs800480t_7x0gp_timing = { @@ -2381,6 +2398,7 @@ static const struct panel_desc osddisplays_osd070t1718_19ts = { }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, + .connector_type = DRM_MODE_CONNECTOR_DPI, }; static const struct drm_display_mode pda_91_00156_a0_mode = { @@ -2628,6 +2646,7 @@ static const struct panel_desc sharp_lq101k1ly04 = { .height = 136, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct display_timing sharp_lq123p1jx31_timing = { @@ -2807,6 +2826,7 @@ static const struct panel_desc tianma_tm070jdhg30 = { .height = 95, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct display_timing tianma_tm070rvhg71_timing = { @@ -2831,6 +2851,7 @@ static const struct panel_desc tianma_tm070rvhg71 = { .height = 86, }, .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct drm_display_mode ti_nspire_cx_lcd_mode[] = { @@ -2913,6 +2934,7 @@ static const struct panel_desc toshiba_lt089ac29000 = { }, .bus_format = MEDIA_BUS_FMT_RGB888_1X24, .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct drm_display_mode tpk_f07a_0102_mode = { @@ -2983,6 +3005,7 @@ static const struct panel_desc urt_umsh_8596md_lvds = { .height = 91, }, .bus_format = MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, + .connector_type = DRM_MODE_CONNECTOR_LVDS, }; static const struct panel_desc urt_umsh_8596md_parallel = { diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7701.c b/drivers/gpu/drm/panel/panel-sitronix-st7701.c index 09c5d9a6f9fa..ee3f23f45755 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7701.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7701.c @@ -369,7 +369,8 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi) if (IS_ERR(st7701->backlight)) return PTR_ERR(st7701->backlight); - drm_panel_init(&st7701->panel); + drm_panel_init(&st7701->panel, &dsi->dev, &st7701_funcs, + DRM_MODE_CONNECTOR_DSI); /** * Once sleep out has been issued, ST7701 IC required to wait 120ms @@ -381,8 +382,6 @@ static int st7701_dsi_probe(struct mipi_dsi_device *dsi) * ts8550b and there is no valid documentation for that. */ st7701->sleep_delay = 120 + desc->panel_sleep_delay; - st7701->panel.funcs = &st7701_funcs; - st7701->panel.dev = &dsi->dev; ret = drm_panel_add(&st7701->panel); if (ret < 0) diff --git a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c index 5e3e92ea9ea6..108a85bb6667 100644 --- a/drivers/gpu/drm/panel/panel-sitronix-st7789v.c +++ b/drivers/gpu/drm/panel/panel-sitronix-st7789v.c @@ -381,8 +381,8 @@ static int st7789v_probe(struct spi_device *spi) spi_set_drvdata(spi, ctx); ctx->spi = spi; - ctx->panel.dev = &spi->dev; - ctx->panel.funcs = &st7789v_drm_funcs; + drm_panel_init(&ctx->panel, &spi->dev, &st7789v_drm_funcs, + DRM_MODE_CONNECTOR_DPI); ctx->power = devm_regulator_get(&spi->dev, "power"); if (IS_ERR(ctx->power)) diff --git a/drivers/gpu/drm/panel/panel-sony-acx565akm.c b/drivers/gpu/drm/panel/panel-sony-acx565akm.c index 3d5b9c4f68d9..d6387d8f88a3 100644 --- a/drivers/gpu/drm/panel/panel-sony-acx565akm.c +++ b/drivers/gpu/drm/panel/panel-sony-acx565akm.c @@ -648,9 +648,8 @@ static int acx565akm_probe(struct spi_device *spi) return ret; } - drm_panel_init(&lcd->panel); - lcd->panel.dev = &lcd->spi->dev; - lcd->panel.funcs = &acx565akm_funcs; + drm_panel_init(&lcd->panel, &lcd->spi->dev, &acx565akm_funcs, + DRM_MODE_CONNECTOR_DPI); ret = drm_panel_add(&lcd->panel); if (ret < 0) { diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c index f2baff827f50..c44d6a65c0aa 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c @@ -347,9 +347,8 @@ static int td028ttec1_probe(struct spi_device *spi) return ret; } - drm_panel_init(&lcd->panel); - lcd->panel.dev = &lcd->spi->dev; - lcd->panel.funcs = &td028ttec1_funcs; + drm_panel_init(&lcd->panel, &lcd->spi->dev, &td028ttec1_funcs, + DRM_MODE_CONNECTOR_DPI); return drm_panel_add(&lcd->panel); } diff --git a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c index ba163c779084..621b65feec07 100644 --- a/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c +++ b/drivers/gpu/drm/panel/panel-tpo-td043mtea1.c @@ -458,9 +458,8 @@ static int td043mtea1_probe(struct spi_device *spi) return ret; } - drm_panel_init(&lcd->panel); - lcd->panel.dev = &lcd->spi->dev; - lcd->panel.funcs = &td043mtea1_funcs; + drm_panel_init(&lcd->panel, &lcd->spi->dev, &td043mtea1_funcs, + DRM_MODE_CONNECTOR_DPI); ret = drm_panel_add(&lcd->panel); if (ret < 0) { diff --git a/drivers/gpu/drm/panel/panel-tpo-tpg110.c b/drivers/gpu/drm/panel/panel-tpo-tpg110.c index 71591e5f5938..1a5418ae2ccf 100644 --- a/drivers/gpu/drm/panel/panel-tpo-tpg110.c +++ b/drivers/gpu/drm/panel/panel-tpo-tpg110.c @@ -457,9 +457,8 @@ static int tpg110_probe(struct spi_device *spi) if (ret) return ret; - drm_panel_init(&tpg->panel); - tpg->panel.dev = dev; - tpg->panel.funcs = &tpg110_drm_funcs; + drm_panel_init(&tpg->panel, dev, &tpg110_drm_funcs, + DRM_MODE_CONNECTOR_DPI); spi_set_drvdata(spi, tpg); return drm_panel_add(&tpg->panel); diff --git a/drivers/gpu/drm/panel/panel-truly-nt35597.c b/drivers/gpu/drm/panel/panel-truly-nt35597.c index 77e1311b7c69..0feea2456e14 100644 --- a/drivers/gpu/drm/panel/panel-truly-nt35597.c +++ b/drivers/gpu/drm/panel/panel-truly-nt35597.c @@ -518,9 +518,8 @@ static int truly_nt35597_panel_add(struct truly_nt35597 *ctx) /* dual port */ gpiod_set_value(ctx->mode_gpio, 0); - drm_panel_init(&ctx->panel); - ctx->panel.dev = dev; - ctx->panel.funcs = &truly_nt35597_drm_funcs; + drm_panel_init(&ctx->panel, dev, &truly_nt35597_drm_funcs, + DRM_MODE_CONNECTOR_DSI); drm_panel_add(&ctx->panel); return 0; diff --git a/drivers/gpu/drm/panfrost/TODO b/drivers/gpu/drm/panfrost/TODO index 536a0d4f8d29..8c811a9e683b 100644 --- a/drivers/gpu/drm/panfrost/TODO +++ b/drivers/gpu/drm/panfrost/TODO @@ -10,3 +10,5 @@ - Compute job support. So called 'compute only' jobs need to be plumbed up to userspace. + +- Support core dump on job failure diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.c b/drivers/gpu/drm/panfrost/panfrost_devfreq.c index 12ff77dacc95..4c4e8a30a1ac 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.c +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.c @@ -13,97 +13,42 @@ #include "panfrost_gpu.h" #include "panfrost_regs.h" -static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot); +static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev); static int panfrost_devfreq_target(struct device *dev, unsigned long *freq, u32 flags) { - struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev)); - struct dev_pm_opp *opp; - unsigned long old_clk_rate = pfdev->devfreq.cur_freq; - unsigned long target_volt, target_rate; + struct panfrost_device *pfdev = dev_get_drvdata(dev); int err; - opp = devfreq_recommended_opp(dev, freq, flags); - if (IS_ERR(opp)) - return PTR_ERR(opp); - - target_rate = dev_pm_opp_get_freq(opp); - target_volt = dev_pm_opp_get_voltage(opp); - dev_pm_opp_put(opp); - - if (old_clk_rate == target_rate) - return 0; - - /* - * If frequency scaling from low to high, adjust voltage first. - * If frequency scaling from high to low, adjust frequency first. - */ - if (old_clk_rate < target_rate) { - err = regulator_set_voltage(pfdev->regulator, target_volt, - target_volt); - if (err) { - dev_err(dev, "Cannot set voltage %lu uV\n", - target_volt); - return err; - } - } - - err = clk_set_rate(pfdev->clock, target_rate); - if (err) { - dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate, - err); - regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt, - pfdev->devfreq.cur_volt); + err = dev_pm_opp_set_rate(dev, *freq); + if (err) return err; - } - if (old_clk_rate > target_rate) { - err = regulator_set_voltage(pfdev->regulator, target_volt, - target_volt); - if (err) - dev_err(dev, "Cannot set voltage %lu uV\n", target_volt); - } - - pfdev->devfreq.cur_freq = target_rate; - pfdev->devfreq.cur_volt = target_volt; + *freq = clk_get_rate(pfdev->clock); return 0; } static void panfrost_devfreq_reset(struct panfrost_device *pfdev) { - ktime_t now = ktime_get(); - int i; - - for (i = 0; i < NUM_JOB_SLOTS; i++) { - pfdev->devfreq.slot[i].busy_time = 0; - pfdev->devfreq.slot[i].idle_time = 0; - pfdev->devfreq.slot[i].time_last_update = now; - } + pfdev->devfreq.busy_time = 0; + pfdev->devfreq.idle_time = 0; + pfdev->devfreq.time_last_update = ktime_get(); } static int panfrost_devfreq_get_dev_status(struct device *dev, struct devfreq_dev_status *status) { - struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev)); - int i; + struct panfrost_device *pfdev = dev_get_drvdata(dev); - for (i = 0; i < NUM_JOB_SLOTS; i++) { - panfrost_devfreq_update_utilization(pfdev, i); - } + panfrost_devfreq_update_utilization(pfdev); status->current_frequency = clk_get_rate(pfdev->clock); - status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time, - pfdev->devfreq.slot[0].idle_time)); + status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.busy_time, + pfdev->devfreq.idle_time)); - status->busy_time = 0; - for (i = 0; i < NUM_JOB_SLOTS; i++) { - status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time); - } - - /* We're scheduling only to one core atm, so don't divide for now */ - /* status->busy_time /= NUM_JOB_SLOTS; */ + status->busy_time = ktime_to_ns(pfdev->devfreq.busy_time); panfrost_devfreq_reset(pfdev); @@ -119,7 +64,7 @@ static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq { struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev)); - *freq = pfdev->devfreq.cur_freq; + *freq = clk_get_rate(pfdev->clock); return 0; } @@ -135,6 +80,7 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) { int ret; struct dev_pm_opp *opp; + unsigned long cur_freq; ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev); if (ret == -ENODEV) /* Optional, continue without devfreq */ @@ -144,13 +90,13 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev) panfrost_devfreq_reset(pfdev); - pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock); + cur_freq = clk_get_rate(pfdev->clock); - opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0); + opp = devfreq_recommended_opp(&pfdev->pdev->dev, &cur_freq, 0); if (IS_ERR(opp)) return PTR_ERR(opp); - panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq; + panfrost_devfreq_profile.initial_freq = cur_freq; dev_pm_opp_put(opp); pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev, @@ -174,14 +120,10 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev) void panfrost_devfreq_resume(struct panfrost_device *pfdev) { - int i; - if (!pfdev->devfreq.devfreq) return; panfrost_devfreq_reset(pfdev); - for (i = 0; i < NUM_JOB_SLOTS; i++) - pfdev->devfreq.slot[i].busy = false; devfreq_resume_device(pfdev->devfreq.devfreq); } @@ -194,9 +136,8 @@ void panfrost_devfreq_suspend(struct panfrost_device *pfdev) devfreq_suspend_device(pfdev->devfreq.devfreq); } -static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot) +static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev) { - struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot]; ktime_t now; ktime_t last; @@ -204,22 +145,27 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i return; now = ktime_get(); - last = pfdev->devfreq.slot[slot].time_last_update; + last = pfdev->devfreq.time_last_update; - /* If we last recorded a transition to busy, we have been idle since */ - if (devfreq_slot->busy) - pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last); + if (atomic_read(&pfdev->devfreq.busy_count) > 0) + pfdev->devfreq.busy_time += ktime_sub(now, last); else - pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last); + pfdev->devfreq.idle_time += ktime_sub(now, last); - pfdev->devfreq.slot[slot].time_last_update = now; + pfdev->devfreq.time_last_update = now; +} + +void panfrost_devfreq_record_busy(struct panfrost_device *pfdev) +{ + panfrost_devfreq_update_utilization(pfdev); + atomic_inc(&pfdev->devfreq.busy_count); } -/* The job scheduler is expected to call this at every transition busy <-> idle */ -void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot) +void panfrost_devfreq_record_idle(struct panfrost_device *pfdev) { - struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot]; + int count; - panfrost_devfreq_update_utilization(pfdev, slot); - devfreq_slot->busy = !devfreq_slot->busy; + panfrost_devfreq_update_utilization(pfdev); + count = atomic_dec_if_positive(&pfdev->devfreq.busy_count); + WARN_ON(count < 0); } diff --git a/drivers/gpu/drm/panfrost/panfrost_devfreq.h b/drivers/gpu/drm/panfrost/panfrost_devfreq.h index e3bc63e82843..0611beffc8d0 100644 --- a/drivers/gpu/drm/panfrost/panfrost_devfreq.h +++ b/drivers/gpu/drm/panfrost/panfrost_devfreq.h @@ -10,6 +10,7 @@ void panfrost_devfreq_fini(struct panfrost_device *pfdev); void panfrost_devfreq_resume(struct panfrost_device *pfdev); void panfrost_devfreq_suspend(struct panfrost_device *pfdev); -void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot); +void panfrost_devfreq_record_busy(struct panfrost_device *pfdev); +void panfrost_devfreq_record_idle(struct panfrost_device *pfdev); #endif /* __PANFROST_DEVFREQ_H__ */ diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h index 9c39b9794811..06713811b92c 100644 --- a/drivers/gpu/drm/panfrost/panfrost_device.h +++ b/drivers/gpu/drm/panfrost/panfrost_device.h @@ -51,13 +51,6 @@ struct panfrost_features { unsigned long hw_issues[64 / BITS_PER_LONG]; }; -struct panfrost_devfreq_slot { - ktime_t busy_time; - ktime_t idle_time; - ktime_t time_last_update; - bool busy; -}; - struct panfrost_device { struct device *dev; struct drm_device *ddev; @@ -93,9 +86,10 @@ struct panfrost_device { struct { struct devfreq *devfreq; struct thermal_cooling_device *cooling; - unsigned long cur_freq; - unsigned long cur_volt; - struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS]; + ktime_t busy_time; + ktime_t idle_time; + ktime_t time_last_update; + atomic_t busy_count; } devfreq; }; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index f21bc8a7ee3a..9458dc6c750c 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -470,7 +470,7 @@ static const struct drm_ioctl_desc panfrost_drm_driver_ioctls[] = { PANFROST_IOCTL(MADVISE, madvise, DRM_RENDER_ALLOW), }; -DEFINE_DRM_GEM_SHMEM_FOPS(panfrost_drm_driver_fops); +DEFINE_DRM_GEM_FOPS(panfrost_drm_driver_fops); /* * Panfrost driver version: diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index acb07fe06580..deca0c30bbd4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -112,7 +112,7 @@ static const struct drm_gem_object_funcs panfrost_gem_funcs = { .get_sg_table = drm_gem_shmem_get_sg_table, .vmap = drm_gem_shmem_vmap, .vunmap = drm_gem_shmem_vunmap, - .vm_ops = &drm_gem_shmem_vm_ops, + .mmap = drm_gem_shmem_mmap, }; /** diff --git a/drivers/gpu/drm/panfrost/panfrost_issues.h b/drivers/gpu/drm/panfrost/panfrost_issues.h index cec6dcdadb5c..8e59d765bf19 100644 --- a/drivers/gpu/drm/panfrost/panfrost_issues.h +++ b/drivers/gpu/drm/panfrost/panfrost_issues.h @@ -13,37 +13,118 @@ * to care about. */ enum panfrost_hw_issue { + /* Need way to guarantee that all previously-translated memory accesses + * are commited */ HW_ISSUE_6367, + + /* On job complete with non-done the cache is not flushed */ HW_ISSUE_6787, + + /* Write of PRFCNT_CONFIG_MODE_MANUAL to PRFCNT_CONFIG causes a + * instrumentation dump if PRFCNT_TILER_EN is enabled */ HW_ISSUE_8186, + + /* TIB: Reports faults from a vtile which has not yet been allocated */ HW_ISSUE_8245, + + /* uTLB deadlock could occur when writing to an invalid page at the + * same time as access to a valid page in the same uTLB cache line ( == + * 4 PTEs == 16K block of mapping) */ HW_ISSUE_8316, + + /* HT: TERMINATE for RUN command ignored if previous LOAD_DESCRIPTOR is + * still executing */ HW_ISSUE_8394, + + /* CSE: Sends a TERMINATED response for a task that should not be + * terminated */ HW_ISSUE_8401, + + /* Repeatedly Soft-stopping a job chain consisting of (Vertex Shader, + * Cache Flush, Tiler) jobs causes DATA_INVALID_FAULT on tiler job. */ HW_ISSUE_8408, + + /* Disable the Pause Buffer in the LS pipe. */ HW_ISSUE_8443, + + /* Change in RMUs in use causes problems related with the core's SDC */ HW_ISSUE_8987, + + /* Compute endpoint has a 4-deep queue of tasks, meaning a soft stop + * won't complete until all 4 tasks have completed */ HW_ISSUE_9435, + + /* HT: Tiler returns TERMINATED for non-terminated command */ HW_ISSUE_9510, + + /* Occasionally the GPU will issue multiple page faults for the same + * address before the MMU page table has been read by the GPU */ HW_ISSUE_9630, + + /* RA DCD load request to SDC returns invalid load ignore causing + * colour buffer mismatch */ HW_ISSUE_10327, + + /* MMU TLB invalidation hazards */ HW_ISSUE_10649, + + /* Missing cache flush in multi core-group configuration */ HW_ISSUE_10676, + + /* Chicken bit on T72X for a hardware workaround in compiler */ HW_ISSUE_10797, + + /* Soft-stopping fragment jobs might fail with TILE_RANGE_FAULT */ HW_ISSUE_10817, + + /* Intermittent missing interrupt on job completion */ HW_ISSUE_10883, + + /* Soft-stopping fragment jobs might fail with TILE_RANGE_ERROR + * (similar to issue 10817) and can use #10817 workaround */ HW_ISSUE_10959, + + /* Soft-stopped fragment shader job can restart with out-of-bound + * restart index */ HW_ISSUE_10969, + + /* Race condition can cause tile list corruption */ HW_ISSUE_11020, + + /* Write buffer can cause tile list corruption */ HW_ISSUE_11024, + + /* Pause buffer can cause a fragment job hang */ HW_ISSUE_11035, + + /* Dynamic Core Scaling not supported due to errata */ HW_ISSUE_11056, + + /* Clear encoder state for a hard stopped fragment job which is AFBC + * encoded by soft resetting the GPU. Only for T76X r0p0, r0p1 and + * r0p1_50rel0 */ HW_ISSUE_T76X_3542, + + /* Keep tiler module clock on to prevent GPU stall */ HW_ISSUE_T76X_3953, + + /* Must ensure L2 is not transitioning when we reset. Workaround with a + * busy wait until L2 completes transition; ensure there is a maximum + * loop count as she may never complete her transition. (On chips + * without this errata, it's totally okay if L2 transitions.) */ HW_ISSUE_TMIX_8463, + + /* Don't set SC_LS_ATTR_CHECK_DISABLE/SC_LS_ALLOW_ATTR_TYPES */ GPUCORE_1619, + + /* When a hard-stop follows close after a soft-stop, the completion + * code for the terminated job may be incorrectly set to STOPPED */ HW_ISSUE_TMIX_8438, + + /* "Protected mode" is buggy on Mali-G31 some Bifrost chips, so the + * kernel must fiddle with L2 caches to prevent data leakage */ HW_ISSUE_TGOX_R1_1234, + HW_ISSUE_END }; diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index 21f34d44aac2..d411eb6c8eb9 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -155,8 +155,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) } cfg = panfrost_mmu_as_get(pfdev, &job->file_priv->mmu); - - panfrost_devfreq_record_transition(pfdev, js); + panfrost_devfreq_record_busy(pfdev); job_write(pfdev, JS_HEAD_NEXT_LO(js), jc_head & 0xFFFFFFFF); job_write(pfdev, JS_HEAD_NEXT_HI(js), jc_head >> 32); @@ -404,9 +403,7 @@ static void panfrost_job_timedout(struct drm_sched_job *sched_job) } spin_unlock_irqrestore(&pfdev->js->job_lock, flags); - /* panfrost_core_dump(pfdev); */ - - panfrost_devfreq_record_transition(pfdev, js); + panfrost_devfreq_record_idle(pfdev); panfrost_device_reset(pfdev); for (i = 0; i < NUM_JOB_SLOTS; i++) @@ -469,7 +466,7 @@ static irqreturn_t panfrost_job_irq_handler(int irq, void *data) pfdev->jobs[j] = NULL; panfrost_mmu_as_put(pfdev, &job->file_priv->mmu); - panfrost_devfreq_record_transition(pfdev, j); + panfrost_devfreq_record_idle(pfdev); dma_fence_signal_locked(job->done_fence); pm_runtime_put_autosuspend(pfdev->dev); @@ -570,14 +567,14 @@ int panfrost_job_is_idle(struct panfrost_device *pfdev) struct panfrost_job_slot *js = pfdev->js; int i; + /* Check whether the hardware is idle */ + if (atomic_read(&pfdev->devfreq.busy_count)) + return false; + for (i = 0; i < NUM_JOB_SLOTS; i++) { /* If there are any jobs in the HW queue, we're not idle */ if (atomic_read(&js->queue[i].sched.hw_rq_count)) return false; - - /* Check whether the hardware is idle */ - if (pfdev->devfreq.slot[i].busy) - return false; } return true; diff --git a/drivers/gpu/drm/pl111/pl111_display.c b/drivers/gpu/drm/pl111/pl111_display.c index 024771a4083e..703ddc803c55 100644 --- a/drivers/gpu/drm/pl111/pl111_display.c +++ b/drivers/gpu/drm/pl111/pl111_display.c @@ -48,10 +48,10 @@ irqreturn_t pl111_irq(int irq, void *data) } static enum drm_mode_status -pl111_mode_valid(struct drm_crtc *crtc, +pl111_mode_valid(struct drm_simple_display_pipe *pipe, const struct drm_display_mode *mode) { - struct drm_device *drm = crtc->dev; + struct drm_device *drm = pipe->crtc.dev; struct pl111_drm_dev_private *priv = drm->dev_private; u32 cpp = priv->variant->fb_bpp / 8; u64 bw; diff --git a/drivers/gpu/drm/pl111/pl111_drv.c b/drivers/gpu/drm/pl111/pl111_drv.c index 276b53473a84..63dfcda04147 100644 --- a/drivers/gpu/drm/pl111/pl111_drv.c +++ b/drivers/gpu/drm/pl111/pl111_drv.c @@ -150,8 +150,8 @@ static int pl111_modeset_init(struct drm_device *dev) return -EPROBE_DEFER; if (panel) { - bridge = drm_panel_bridge_add(panel, - DRM_MODE_CONNECTOR_Unknown); + bridge = drm_panel_bridge_add_typed(panel, + DRM_MODE_CONNECTOR_Unknown); if (IS_ERR(bridge)) { ret = PTR_ERR(bridge); goto out_config; diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig index d0d691b31f4a..ca3f51c2a8fe 100644 --- a/drivers/gpu/drm/qxl/Kconfig +++ b/drivers/gpu/drm/qxl/Kconfig @@ -4,6 +4,7 @@ config DRM_QXL depends on DRM && PCI && MMU select DRM_KMS_HELPER select DRM_TTM + select DRM_TTM_HELPER select CRC32 help QXL virtual GPU for Spice virtualization desktop integration. diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 265bfe9f8016..1d601f57a6ba 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -88,7 +88,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto free_dev; - ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "qxl"); + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl"); if (ret) goto disable_pci; @@ -150,15 +150,7 @@ qxl_pci_remove(struct pci_dev *pdev) drm_dev_put(dev); } -static const struct file_operations qxl_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .release = drm_release, - .unlocked_ioctl = drm_ioctl, - .poll = drm_poll, - .read = drm_read, - .mmap = qxl_mmap, -}; +DEFINE_DRM_GEM_FOPS(qxl_fops); static int qxl_drm_freeze(struct drm_device *dev) { @@ -276,16 +268,8 @@ static struct drm_driver qxl_driver = { #endif .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_pin = qxl_gem_prime_pin, - .gem_prime_unpin = qxl_gem_prime_unpin, - .gem_prime_get_sg_table = qxl_gem_prime_get_sg_table, .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, - .gem_prime_vmap = qxl_gem_prime_vmap, - .gem_prime_vunmap = qxl_gem_prime_vunmap, .gem_prime_mmap = qxl_gem_prime_mmap, - .gem_free_object_unlocked = qxl_gem_object_free, - .gem_open_object = qxl_gem_object_open, - .gem_close_object = qxl_gem_object_close, .fops = &qxl_fops, .ioctls = qxl_ioctls, .irq_handler = qxl_irq_handler, diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 9e034c5fa87d..27e45a2d6b52 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -38,6 +38,7 @@ #include <drm/drm_crtc.h> #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> +#include <drm/drm_gem_ttm_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_gem.h> #include <drm/qxl_drm.h> @@ -354,7 +355,8 @@ int qxl_mode_dumb_mmap(struct drm_file *filp, /* qxl ttm */ int qxl_ttm_init(struct qxl_device *qdev); void qxl_ttm_fini(struct qxl_device *qdev); -int qxl_mmap(struct file *filp, struct vm_area_struct *vma); +int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem); /* qxl image */ diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 548dfe6f3b26..ab72dc3476e9 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -54,9 +54,14 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) { u32 c = 0; - u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; + u32 pflag = 0; unsigned int i; + if (pinned) + pflag |= TTM_PL_FLAG_NO_EVICT; + if (qbo->tbo.base.size <= PAGE_SIZE) + pflag |= TTM_PL_FLAG_TOPDOWN; + qbo->placement.placement = qbo->placements; qbo->placement.busy_placement = qbo->placements; if (domain == QXL_GEM_DOMAIN_VRAM) @@ -77,6 +82,19 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) } } +static const struct drm_gem_object_funcs qxl_object_funcs = { + .free = qxl_gem_object_free, + .open = qxl_gem_object_open, + .close = qxl_gem_object_close, + .pin = qxl_gem_prime_pin, + .unpin = qxl_gem_prime_unpin, + .get_sg_table = qxl_gem_prime_get_sg_table, + .vmap = qxl_gem_prime_vmap, + .vunmap = qxl_gem_prime_vunmap, + .mmap = drm_gem_ttm_mmap, + .print_info = drm_gem_ttm_print_info, +}; + int qxl_bo_create(struct qxl_device *qdev, unsigned long size, bool kernel, bool pinned, u32 domain, struct qxl_surface *surf, @@ -100,6 +118,7 @@ int qxl_bo_create(struct qxl_device *qdev, kfree(bo); return r; } + bo->tbo.base.funcs = &qxl_object_funcs; bo->type = domain; bo->pin_count = pinned ? 1 : 0; bo->surface_id = 0; @@ -148,7 +167,6 @@ int qxl_bo_kmap(struct qxl_bo *bo, void **ptr) void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset) { - struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; void *rptr; int ret; struct io_mapping *map; @@ -160,9 +178,7 @@ void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, else goto fallback; - (void) ttm_mem_io_lock(man, false); - ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem); - ttm_mem_io_unlock(man); + ret = qxl_ttm_io_mem_reserve(bo->tbo.bdev, &bo->tbo.mem); return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset); fallback: @@ -193,17 +209,11 @@ void qxl_bo_kunmap(struct qxl_bo *bo) void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *pmap) { - struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type]; - if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) && (bo->tbo.mem.mem_type != TTM_PL_PRIV)) goto fallback; io_mapping_unmap_atomic(pmap); - - (void) ttm_mem_io_lock(man, false); - ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem); - ttm_mem_io_unlock(man); return; fallback: qxl_bo_kunmap(bo); diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index 312216caeea2..2feca734c7b1 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -260,7 +260,7 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) return 0; ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, - !no_intr, NULL, true); + !no_intr, NULL); if (ret) return ret; @@ -429,7 +429,6 @@ void qxl_release_unmap(struct qxl_device *qdev, void qxl_release_fence_buffer_objects(struct qxl_release *release) { struct ttm_buffer_object *bo; - struct ttm_bo_global *glob; struct ttm_bo_device *bdev; struct ttm_validate_buffer *entry; struct qxl_device *qdev; @@ -451,18 +450,16 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release) release->id | 0xf0000000, release->base.seqno); trace_dma_fence_emit(&release->base); - glob = bdev->glob; - - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); list_for_each_entry(entry, &release->bos, head) { bo = entry->bo; dma_resv_add_shared_fence(bo->base.resv, &release->base); - ttm_bo_add_to_lru(bo); + ttm_bo_move_to_lru_tail(bo, NULL); dma_resv_unlock(bo->base.resv); } - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); ww_acquire_fini(&release->ticket); } diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 9b24514c75aa..16a5e903533d 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -48,47 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev) return qdev; } -static struct vm_operations_struct qxl_ttm_vm_ops; -static const struct vm_operations_struct *ttm_vm_ops; - -static vm_fault_t qxl_ttm_fault(struct vm_fault *vmf) -{ - struct ttm_buffer_object *bo; - vm_fault_t ret; - - bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data; - if (bo == NULL) - return VM_FAULT_NOPAGE; - ret = ttm_vm_ops->fault(vmf); - return ret; -} - -int qxl_mmap(struct file *filp, struct vm_area_struct *vma) -{ - int r; - struct drm_file *file_priv = filp->private_data; - struct qxl_device *qdev = file_priv->minor->dev->dev_private; - - if (qdev == NULL) { - DRM_ERROR( - "filp->private_data->minor->dev->dev_private == NULL\n"); - return -EINVAL; - } - DRM_DEBUG_DRIVER("filp->private_data = 0x%p, vma->vm_pgoff = %lx\n", - filp->private_data, vma->vm_pgoff); - - r = ttm_bo_mmap(filp, vma, &qdev->mman.bdev); - if (unlikely(r != 0)) - return r; - if (unlikely(ttm_vm_ops == NULL)) { - ttm_vm_ops = vma->vm_ops; - qxl_ttm_vm_ops = *ttm_vm_ops; - qxl_ttm_vm_ops.fault = &qxl_ttm_fault; - } - vma->vm_ops = &qxl_ttm_vm_ops; - return 0; -} - static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) { return 0; @@ -151,16 +110,8 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, *placement = qbo->placement; } -static int qxl_verify_access(struct ttm_buffer_object *bo, struct file *filp) -{ - struct qxl_bo *qbo = to_qxl_bo(bo); - - return drm_vma_node_verify_access(&qbo->tbo.base.vma_node, - filp->private_data); -} - -static int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) +int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct qxl_device *qdev = qxl_get_qdev(bdev); @@ -310,7 +261,6 @@ static struct ttm_bo_driver qxl_bo_driver = { .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = &qxl_evict_flags, .move = &qxl_bo_move, - .verify_access = &qxl_verify_access, .io_mem_reserve = &qxl_ttm_io_mem_reserve, .io_mem_free = &qxl_ttm_io_mem_free, .move_notify = &qxl_bo_move_notify, @@ -325,6 +275,7 @@ int qxl_ttm_init(struct qxl_device *qdev) r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, qdev->ddev.anon_inode->i_mapping, + qdev->ddev.vma_offset_manager, false); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); @@ -368,14 +319,11 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_mm *mm = (struct drm_mm *)node->info_ent->data; - struct drm_device *dev = node->minor->dev; - struct qxl_device *rdev = dev->dev_private; - struct ttm_bo_global *glob = rdev->mman.bdev.glob; struct drm_printer p = drm_seq_file_printer(m); - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); drm_mm_print(mm, &p); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); return 0; } #endif diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 62eab82a64f9..acabeaf28732 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -221,9 +221,7 @@ int ci_get_temp(struct radeon_device *rdev) else actual_temp = temp & 0x1ff; - actual_temp = actual_temp * 1000; - - return actual_temp; + return actual_temp * 1000; } /* get temperature in millidegrees */ @@ -239,9 +237,7 @@ int kv_get_temp(struct radeon_device *rdev) else actual_temp = 0; - actual_temp = actual_temp * 1000; - - return actual_temp; + return actual_temp * 1000; } /* @@ -6969,8 +6965,8 @@ static int cik_irq_init(struct radeon_device *rdev) } /* setup interrupt control */ - /* XXX this should actually be a bus address, not an MC address. same on older asics */ - WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index e937cc01910d..033bc466a862 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3696,8 +3696,8 @@ int r600_irq_init(struct radeon_device *rdev) } /* setup interrupt control */ - /* set dummy read address to ring address */ - WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c index b9aea5776d3d..72db2b41e96d 100644 --- a/drivers/gpu/drm/radeon/radeon_audio.c +++ b/drivers/gpu/drm/radeon/radeon_audio.c @@ -367,10 +367,10 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) return; sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); - if (sad_count <= 0) { + if (sad_count < 0) DRM_ERROR("Couldn't read SADs: %d\n", sad_count); + if (sad_count <= 0) return; - } BUG_ON(!sads); if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs) diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index b684cd719612..c07427d3c199 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -249,11 +249,10 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c struct drm_encoder *encoder; const struct drm_connector_helper_funcs *connector_funcs = connector->helper_private; bool connected; - int i; best_encoder = connector_funcs->best_encoder(connector); - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { if ((encoder == best_encoder) && (status == connector_status_connected)) connected = true; else @@ -269,9 +268,8 @@ radeon_connector_update_scratch_regs(struct drm_connector *connector, enum drm_c static struct drm_encoder *radeon_find_encoder(struct drm_connector *connector, int encoder_type) { struct drm_encoder *encoder; - int i; - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { if (encoder->encoder_type == encoder_type) return encoder; } @@ -380,10 +378,9 @@ static int radeon_ddc_get_modes(struct drm_connector *connector) static struct drm_encoder *radeon_best_single_encoder(struct drm_connector *connector) { struct drm_encoder *encoder; - int i; /* pick the first one */ - drm_connector_for_each_possible_encoder(connector, encoder, i) + drm_connector_for_each_possible_encoder(connector, encoder) return encoder; return NULL; @@ -428,14 +425,13 @@ radeon_connector_analog_encoder_conflict_solve(struct drm_connector *connector, list_for_each_entry(conflict, &dev->mode_config.connector_list, head) { struct drm_encoder *enc; - int i; if (conflict == connector) continue; radeon_conflict = to_radeon_connector(conflict); - drm_connector_for_each_possible_encoder(conflict, enc, i) { + drm_connector_for_each_possible_encoder(conflict, enc) { /* if the IDs match */ if (enc == encoder) { if (conflict->status != connector_status_connected) @@ -1363,9 +1359,7 @@ radeon_dvi_detect(struct drm_connector *connector, bool force) /* find analog encoder */ if (radeon_connector->dac_load_detect) { - int i; - - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { if (encoder->encoder_type != DRM_MODE_ENCODER_DAC && encoder->encoder_type != DRM_MODE_ENCODER_TVDAC) continue; @@ -1443,9 +1437,8 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct drm_encoder *encoder; - int i; - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { if (radeon_connector->use_digital == true) { if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) return encoder; @@ -1460,7 +1453,7 @@ static struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector) /* then check use digitial */ /* pick the first one */ - drm_connector_for_each_possible_encoder(connector, encoder, i) + drm_connector_for_each_possible_encoder(connector, encoder) return encoder; return NULL; @@ -1603,9 +1596,8 @@ u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *conn { struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; - int i; - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { radeon_encoder = to_radeon_encoder(encoder); switch (radeon_encoder->encoder_id) { @@ -1624,10 +1616,9 @@ static bool radeon_connector_encoder_is_hbr2(struct drm_connector *connector) { struct drm_encoder *encoder; struct radeon_encoder *radeon_encoder; - int i; bool found = false; - drm_connector_for_each_possible_encoder(connector, encoder, i) { + drm_connector_for_each_possible_encoder(connector, encoder) { radeon_encoder = to_radeon_encoder(encoder); if (radeon_encoder->caps & ATOM_ENCODER_CAP_RECORD_HBR2) found = true; diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c index 2994f07fbad9..ee28f5b3785e 100644 --- a/drivers/gpu/drm/radeon/radeon_dp_mst.c +++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c @@ -233,21 +233,26 @@ drm_encoder *radeon_mst_best_encoder(struct drm_connector *connector) return &radeon_connector->mst_encoder->base; } +static int +radeon_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + bool force) +{ + struct radeon_connector *radeon_connector = + to_radeon_connector(connector); + struct radeon_connector *master = radeon_connector->mst_port; + + return drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, + radeon_connector->port); +} + static const struct drm_connector_helper_funcs radeon_dp_mst_connector_helper_funcs = { .get_modes = radeon_dp_mst_get_modes, .mode_valid = radeon_dp_mst_mode_valid, .best_encoder = radeon_mst_best_encoder, + .detect_ctx = radeon_dp_mst_detect, }; -static enum drm_connector_status -radeon_dp_mst_detect(struct drm_connector *connector, bool force) -{ - struct radeon_connector *radeon_connector = to_radeon_connector(connector); - struct radeon_connector *master = radeon_connector->mst_port; - - return drm_dp_mst_detect_port(connector, &master->mst_mgr, radeon_connector->port); -} - static void radeon_dp_mst_connector_destroy(struct drm_connector *connector) { @@ -262,7 +267,6 @@ radeon_dp_mst_connector_destroy(struct drm_connector *connector) static const struct drm_connector_funcs radeon_dp_mst_connector_funcs = { .dpms = drm_helper_connector_dpms, - .detect = radeon_dp_mst_detect, .fill_modes = drm_helper_probe_single_connector_modes, .destroy = radeon_dp_mst_connector_destroy, }; diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 4528f4dc0b2d..fd74e2611185 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -361,7 +361,7 @@ static int radeon_pci_probe(struct pci_dev *pdev, return -EPROBE_DEFER; /* Get rid of things like offb */ - ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "radeondrmfb"); + ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "radeondrmfb"); if (ret) return ret; @@ -379,10 +379,6 @@ radeon_pci_remove(struct pci_dev *pdev) static void radeon_pci_shutdown(struct pci_dev *pdev) { -#ifdef CONFIG_PPC64 - struct drm_device *ddev = pci_get_drvdata(pdev); -#endif - /* if we are running in a VM, make sure the device * torn down properly on reboot/shutdown */ @@ -390,13 +386,14 @@ radeon_pci_shutdown(struct pci_dev *pdev) radeon_pci_remove(pdev); #ifdef CONFIG_PPC64 - /* Some adapters need to be suspended before a + /* + * Some adapters need to be suspended before a * shutdown occurs in order to prevent an error * during kexec. * Make this power specific becauase it breaks * some non-power boards. */ - radeon_suspend_kms(ddev, true, true, false); + radeon_suspend_kms(pci_get_drvdata(pdev), true, true, false); #endif } diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index b2b076606f54..67298a0739cb 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -566,7 +566,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev, if (!vm_bos) return; - r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL, true); + r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL); if (r) goto error_free; diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 2abe1eab471f..140d94cc080d 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -542,7 +542,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev); INIT_LIST_HEAD(&duplicates); - r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates, true); + r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates); if (unlikely(r != 0)) { return r; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index a05e10724d46..098bc9f40b98 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -794,6 +794,7 @@ int radeon_ttm_init(struct radeon_device *rdev) r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->ddev->anon_inode->i_mapping, + rdev->ddev->vma_offset_manager, dma_addressing_limited(&rdev->pdev->dev)); if (r) { DRM_ERROR("failed initializing buffer object driver(%d).\n", r); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 05894d198a79..1d8efb0eefdb 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -5997,8 +5997,8 @@ static int si_irq_init(struct radeon_device *rdev) } /* setup interrupt control */ - /* set dummy read address to ring address */ - WREG32(INTERRUPT_CNTL2, rdev->ih.gpu_addr >> 8); + /* set dummy read address to dummy page address */ + WREG32(INTERRUPT_CNTL2, rdev->dummy_page.addr >> 8); interrupt_cntl = RREG32(INTERRUPT_CNTL); /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 9c93eb4fad8b..f266c17b907a 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -131,6 +131,35 @@ static const struct rcar_du_device_info rcar_du_r8a774a1_info = { .dpll_mask = BIT(1), }; +static const struct rcar_du_device_info rcar_du_r8a774b1_info = { + .gen = 3, + .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK + | RCAR_DU_FEATURE_VSP1_SOURCE + | RCAR_DU_FEATURE_INTERLACED + | RCAR_DU_FEATURE_TVM_SYNC, + .channels_mask = BIT(3) | BIT(1) | BIT(0), + .routes = { + /* + * R8A774B1 has one RGB output, one LVDS output and one HDMI + * output. + */ + [RCAR_DU_OUTPUT_DPAD0] = { + .possible_crtcs = BIT(2), + .port = 0, + }, + [RCAR_DU_OUTPUT_HDMI0] = { + .possible_crtcs = BIT(1), + .port = 1, + }, + [RCAR_DU_OUTPUT_LVDS0] = { + .possible_crtcs = BIT(0), + .port = 2, + }, + }, + .num_lvds = 1, + .dpll_mask = BIT(1), +}; + static const struct rcar_du_device_info rcar_du_r8a774c0_info = { .gen = 3, .features = RCAR_DU_FEATURE_CRTC_IRQ_CLOCK @@ -416,6 +445,7 @@ static const struct of_device_id rcar_du_of_table[] = { { .compatible = "renesas,du-r8a7745", .data = &rzg1_du_r8a7745_info }, { .compatible = "renesas,du-r8a77470", .data = &rzg1_du_r8a77470_info }, { .compatible = "renesas,du-r8a774a1", .data = &rcar_du_r8a774a1_info }, + { .compatible = "renesas,du-r8a774b1", .data = &rcar_du_r8a774b1_info }, { .compatible = "renesas,du-r8a774c0", .data = &rcar_du_r8a774c0_info }, { .compatible = "renesas,du-r8a7779", .data = &rcar_du_r8a7779_info }, { .compatible = "renesas,du-r8a7790", .data = &rcar_du_r8a7790_info }, diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c index 0f00bdfe2366..3cd83a030a04 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c @@ -9,6 +9,7 @@ #include <linux/export.h> +#include <drm/drm_bridge.h> #include <drm/drm_crtc.h> #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_panel.h> @@ -84,8 +85,8 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu, goto done; } - bridge = devm_drm_panel_bridge_add(rcdu->dev, panel, - DRM_MODE_CONNECTOR_DPI); + bridge = devm_drm_panel_bridge_add_typed(rcdu->dev, panel, + DRM_MODE_CONNECTOR_DPI); if (IS_ERR(bridge)) { ret = PTR_ERR(bridge); goto done; diff --git a/drivers/gpu/drm/rcar-du/rcar_du_kms.c b/drivers/gpu/drm/rcar-du/rcar_du_kms.c index 2dc9caee8767..0d59f390de19 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_kms.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_kms.c @@ -585,7 +585,11 @@ static int rcar_du_vsps_init(struct rcar_du_device *rcdu) vsps[j].crtcs_mask |= BIT(i); - /* Store the VSP pointer and pipe index in the CRTC. */ + /* + * Store the VSP pointer and pipe index in the CRTC. If the + * second cell of the 'vsps' specifier isn't present, default + * to 0 to remain compatible with older DT bindings. + */ rcdu->crtcs[i].vsp = &rcdu->vsps[j]; rcdu->crtcs[i].vsp_pipe = cells >= 1 ? args.args[0] : 0; } diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c index 3fc7e6899cab..8c6c172bbf2e 100644 --- a/drivers/gpu/drm/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c @@ -16,6 +16,7 @@ #include <linux/of_graph.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/sys_soc.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -842,8 +843,23 @@ static int rcar_lvds_get_clocks(struct rcar_lvds *lvds) return 0; } +static const struct rcar_lvds_device_info rcar_lvds_r8a7790es1_info = { + .gen = 2, + .quirks = RCAR_LVDS_QUIRK_LANES, + .pll_setup = rcar_lvds_pll_setup_gen2, +}; + +static const struct soc_device_attribute lvds_quirk_matches[] = { + { + .soc_id = "r8a7790", .revision = "ES1.*", + .data = &rcar_lvds_r8a7790es1_info, + }, + { /* sentinel */ } +}; + static int rcar_lvds_probe(struct platform_device *pdev) { + const struct soc_device_attribute *attr; struct rcar_lvds *lvds; struct resource *mem; int ret; @@ -857,6 +873,10 @@ static int rcar_lvds_probe(struct platform_device *pdev) lvds->dev = &pdev->dev; lvds->info = of_device_get_match_data(&pdev->dev); + attr = soc_device_match(lvds_quirk_matches); + if (attr) + lvds->info = attr->data; + ret = rcar_lvds_parse_dt(lvds); if (ret < 0) return ret; @@ -893,12 +913,6 @@ static const struct rcar_lvds_device_info rcar_lvds_gen2_info = { .pll_setup = rcar_lvds_pll_setup_gen2, }; -static const struct rcar_lvds_device_info rcar_lvds_r8a7790_info = { - .gen = 2, - .quirks = RCAR_LVDS_QUIRK_LANES, - .pll_setup = rcar_lvds_pll_setup_gen2, -}; - static const struct rcar_lvds_device_info rcar_lvds_gen3_info = { .gen = 3, .quirks = RCAR_LVDS_QUIRK_PWD, @@ -929,8 +943,9 @@ static const struct of_device_id rcar_lvds_of_table[] = { { .compatible = "renesas,r8a7743-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7744-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a774a1-lvds", .data = &rcar_lvds_gen3_info }, + { .compatible = "renesas,r8a774b1-lvds", .data = &rcar_lvds_gen3_info }, { .compatible = "renesas,r8a774c0-lvds", .data = &rcar_lvds_r8a77990_info }, - { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_r8a7790_info }, + { .compatible = "renesas,r8a7790-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7791-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7793-lvds", .data = &rcar_lvds_gen2_info }, { .compatible = "renesas,r8a7795-lvds", .data = &rcar_lvds_gen3_info }, diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index d505ea7d5384..eed594bd38d3 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -477,8 +477,8 @@ static int cdn_dp_disable(struct cdn_dp_device *dp) cdn_dp_set_firmware_active(dp, false); cdn_dp_clk_disable(dp); dp->active = false; - dp->link.rate = 0; - dp->link.num_lanes = 0; + dp->max_lanes = 0; + dp->max_rate = 0; if (!dp->connected) { kfree(dp->edid); dp->edid = NULL; @@ -570,7 +570,7 @@ static bool cdn_dp_check_link_status(struct cdn_dp_device *dp) struct cdn_dp_port *port = cdn_dp_connected_port(dp); u8 sink_lanes = drm_dp_max_lane_count(dp->dpcd); - if (!port || !dp->link.rate || !dp->link.num_lanes) + if (!port || !dp->max_rate || !dp->max_lanes) return false; if (cdn_dp_dpcd_read(dp, DP_LANE0_1_STATUS, link_status, @@ -952,8 +952,8 @@ static void cdn_dp_pd_event_work(struct work_struct *work) /* Enabled and connected with a sink, re-train if requested */ } else if (!cdn_dp_check_link_status(dp)) { - unsigned int rate = dp->link.rate; - unsigned int lanes = dp->link.num_lanes; + unsigned int rate = dp->max_rate; + unsigned int lanes = dp->max_lanes; struct drm_display_mode *mode = &dp->mode; DRM_DEV_INFO(dp->dev, "Connected with sink. Re-train link\n"); @@ -966,7 +966,7 @@ static void cdn_dp_pd_event_work(struct work_struct *work) /* If training result is changed, update the video config */ if (mode->clock && - (rate != dp->link.rate || lanes != dp->link.num_lanes)) { + (rate != dp->max_rate || lanes != dp->max_lanes)) { ret = cdn_dp_config_video(dp); if (ret) { dp->connected = false; diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.h b/drivers/gpu/drm/rockchip/cdn-dp-core.h index b85ea89eb60b..83c4586665b4 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.h +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.h @@ -92,9 +92,10 @@ struct cdn_dp_device { struct reset_control *core_rst; struct audio_info audio_info; struct video_info video_info; - struct drm_dp_link link; struct cdn_dp_port *port[MAX_PHY]; u8 ports; + u8 max_lanes; + u8 max_rate; u8 lanes; int active_port; diff --git a/drivers/gpu/drm/rockchip/cdn-dp-reg.c b/drivers/gpu/drm/rockchip/cdn-dp-reg.c index 077c87021908..7361c07cb4a7 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-reg.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-reg.c @@ -535,8 +535,8 @@ static int cdn_dp_get_training_status(struct cdn_dp_device *dp) if (ret) goto err_get_training_status; - dp->link.rate = drm_dp_bw_code_to_link_rate(status[0]); - dp->link.num_lanes = status[1]; + dp->max_rate = drm_dp_bw_code_to_link_rate(status[0]); + dp->max_lanes = status[1]; err_get_training_status: if (ret) @@ -560,8 +560,8 @@ int cdn_dp_train_link(struct cdn_dp_device *dp) return ret; } - DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->link.rate, - dp->link.num_lanes); + DRM_DEV_DEBUG_KMS(dp->dev, "rate:0x%x, lanes:%d\n", dp->max_rate, + dp->max_lanes); return ret; } @@ -639,7 +639,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp) bit_per_pix = (video->color_fmt == YCBCR_4_2_2) ? (video->color_depth * 2) : (video->color_depth * 3); - link_rate = dp->link.rate / 1000; + link_rate = dp->max_rate / 1000; ret = cdn_dp_reg_write(dp, BND_HSYNC2VSYNC, VIF_BYPASS_INTERLACE); if (ret) @@ -659,14 +659,13 @@ int cdn_dp_config_video(struct cdn_dp_device *dp) do { tu_size_reg += 2; symbol = tu_size_reg * mode->clock * bit_per_pix; - do_div(symbol, dp->link.num_lanes * link_rate * 8); + do_div(symbol, dp->max_lanes * link_rate * 8); rem = do_div(symbol, 1000); if (tu_size_reg > 64) { ret = -EINVAL; DRM_DEV_ERROR(dp->dev, "tu error, clk:%d, lanes:%d, rate:%d\n", - mode->clock, dp->link.num_lanes, - link_rate); + mode->clock, dp->max_lanes, link_rate); goto err_config_video; } } while ((symbol <= 1) || (tu_size_reg - symbol < 4) || @@ -680,7 +679,7 @@ int cdn_dp_config_video(struct cdn_dp_device *dp) /* set the FIFO Buffer size */ val = div_u64(mode->clock * (symbol + 1), 1000) + link_rate; - val /= (dp->link.num_lanes * link_rate); + val /= (dp->max_lanes * link_rate); val = div_u64(8 * (symbol + 1), bit_per_pix) - val; val += 2; ret = cdn_dp_reg_write(dp, DP_VC_TABLE(15), val); @@ -833,7 +832,7 @@ static void cdn_dp_audio_config_i2s(struct cdn_dp_device *dp, u32 val; if (audio->channels == 2) { - if (dp->link.num_lanes == 1) + if (dp->max_lanes == 1) sub_pckt_num = 2; else sub_pckt_num = 4; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index 906891b03a38..7f56d8c3491d 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -450,6 +450,7 @@ static const struct dw_hdmi_plat_data rk3328_hdmi_drv_data = { .phy_ops = &rk3328_hdmi_phy_ops, .phy_name = "inno_dw_hdmi_phy2", .phy_force_vendor = true, + .use_drm_infoframe = true, }; static struct rockchip_hdmi_chip_data rk3399_chip_data = { @@ -464,6 +465,7 @@ static const struct dw_hdmi_plat_data rk3399_hdmi_drv_data = { .cur_ctr = rockchip_cur_ctr, .phy_config = rockchip_phy_config, .phy_data = &rk3399_chip_data, + .use_drm_infoframe = true, }; static const struct of_device_id dw_hdmi_rockchip_dt_ids[] = { diff --git a/drivers/gpu/drm/rockchip/rk3066_hdmi.c b/drivers/gpu/drm/rockchip/rk3066_hdmi.c index 85fc5f01f761..cdb401f4283d 100644 --- a/drivers/gpu/drm/rockchip/rk3066_hdmi.c +++ b/drivers/gpu/drm/rockchip/rk3066_hdmi.c @@ -743,7 +743,6 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master, struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = data; struct rk3066_hdmi *hdmi; - struct resource *iores; int irq; int ret; @@ -753,12 +752,7 @@ static int rk3066_hdmi_bind(struct device *dev, struct device *master, hdmi->dev = dev; hdmi->drm_dev = drm; - - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!iores) - return -ENXIO; - - hdmi->regs = devm_ioremap_resource(dev, iores); + hdmi->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hdmi->regs)) return PTR_ERR(hdmi->regs); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 291e89b4045f..7582d0e6a60a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -294,7 +294,7 @@ static void rockchip_gem_release_object(struct rockchip_gem_object *rk_obj) kfree(rk_obj); } -struct rockchip_gem_object * +static struct rockchip_gem_object * rockchip_gem_alloc_object(struct drm_device *drm, unsigned int size) { struct rockchip_gem_object *rk_obj; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 613404f86668..d04b3492bdac 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -139,6 +139,7 @@ struct vop { uint32_t *regsbak; void __iomem *regs; + void __iomem *lut_regs; /* physical map length of vop register */ uint32_t len; @@ -1040,14 +1041,118 @@ static bool vop_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *adjusted_mode) { struct vop *vop = to_vop(crtc); + unsigned long rate; - adjusted_mode->clock = - DIV_ROUND_UP(clk_round_rate(vop->dclk, - adjusted_mode->clock * 1000), 1000); + /* + * Clock craziness. + * + * Key points: + * + * - DRM works in in kHz. + * - Clock framework works in Hz. + * - Rockchip's clock driver picks the clock rate that is the + * same _OR LOWER_ than the one requested. + * + * Action plan: + * + * 1. When DRM gives us a mode, we should add 999 Hz to it. That way + * if the clock we need is 60000001 Hz (~60 MHz) and DRM tells us to + * make 60000 kHz then the clock framework will actually give us + * the right clock. + * + * NOTE: if the PLL (maybe through a divider) could actually make + * a clock rate 999 Hz higher instead of the one we want then this + * could be a problem. Unfortunately there's not much we can do + * since it's baked into DRM to use kHz. It shouldn't matter in + * practice since Rockchip PLLs are controlled by tables and + * even if there is a divider in the middle I wouldn't expect PLL + * rates in the table that are just a few kHz different. + * + * 2. Get the clock framework to round the rate for us to tell us + * what it will actually make. + * + * 3. Store the rounded up rate so that we don't need to worry about + * this in the actual clk_set_rate(). + */ + rate = clk_round_rate(vop->dclk, adjusted_mode->clock * 1000 + 999); + adjusted_mode->clock = DIV_ROUND_UP(rate, 1000); return true; } +static bool vop_dsp_lut_is_enabled(struct vop *vop) +{ + return vop_read_reg(vop, 0, &vop->data->common->dsp_lut_en); +} + +static void vop_crtc_write_gamma_lut(struct vop *vop, struct drm_crtc *crtc) +{ + struct drm_color_lut *lut = crtc->state->gamma_lut->data; + unsigned int i; + + for (i = 0; i < crtc->gamma_size; i++) { + u32 word; + + word = (drm_color_lut_extract(lut[i].red, 10) << 20) | + (drm_color_lut_extract(lut[i].green, 10) << 10) | + drm_color_lut_extract(lut[i].blue, 10); + writel(word, vop->lut_regs + i * 4); + } +} + +static void vop_crtc_gamma_set(struct vop *vop, struct drm_crtc *crtc, + struct drm_crtc_state *old_state) +{ + struct drm_crtc_state *state = crtc->state; + unsigned int idle; + int ret; + + if (!vop->lut_regs) + return; + /* + * To disable gamma (gamma_lut is null) or to write + * an update to the LUT, clear dsp_lut_en. + */ + spin_lock(&vop->reg_lock); + VOP_REG_SET(vop, common, dsp_lut_en, 0); + vop_cfg_done(vop); + spin_unlock(&vop->reg_lock); + + /* + * In order to write the LUT to the internal memory, + * we need to first make sure the dsp_lut_en bit is cleared. + */ + ret = readx_poll_timeout(vop_dsp_lut_is_enabled, vop, + idle, !idle, 5, 30 * 1000); + if (ret) { + DRM_DEV_ERROR(vop->dev, "display LUT RAM enable timeout!\n"); + return; + } + + if (!state->gamma_lut) + return; + + spin_lock(&vop->reg_lock); + vop_crtc_write_gamma_lut(vop, crtc); + VOP_REG_SET(vop, common, dsp_lut_en, 1); + vop_cfg_done(vop); + spin_unlock(&vop->reg_lock); +} + +static void vop_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state) +{ + struct vop *vop = to_vop(crtc); + + /* + * Only update GAMMA if the 'active' flag is not changed, + * otherwise it's updated by .atomic_enable. + */ + if (crtc->state->color_mgmt_changed && + !crtc->state->active_changed) + vop_crtc_gamma_set(vop, crtc, old_crtc_state); +} + static void vop_crtc_atomic_enable(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { @@ -1075,6 +1180,14 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc, return; } + /* + * If we have a GAMMA LUT in the state, then let's make sure + * it's updated. We might be coming out of suspend, + * which means the LUT internal memory needs to be re-written. + */ + if (crtc->state->gamma_lut) + vop_crtc_gamma_set(vop, crtc, old_state); + mutex_lock(&vop->vop_lock); WARN_ON(vop->event); @@ -1085,9 +1198,7 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc, DRM_DEV_ERROR(vop->dev, "Failed to enable vop (%d)\n", ret); return; } - - pin_pol = BIT(DCLK_INVERT); - pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ? + pin_pol = (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) ? BIT(HSYNC_POSITIVE) : 0; pin_pol |= (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) ? BIT(VSYNC_POSITIVE) : 0; @@ -1096,25 +1207,29 @@ static void vop_crtc_atomic_enable(struct drm_crtc *crtc, switch (s->output_type) { case DRM_MODE_CONNECTOR_LVDS: - VOP_REG_SET(vop, output, rgb_en, 1); + VOP_REG_SET(vop, output, rgb_dclk_pol, 1); VOP_REG_SET(vop, output, rgb_pin_pol, pin_pol); + VOP_REG_SET(vop, output, rgb_en, 1); break; case DRM_MODE_CONNECTOR_eDP: + VOP_REG_SET(vop, output, edp_dclk_pol, 1); VOP_REG_SET(vop, output, edp_pin_pol, pin_pol); VOP_REG_SET(vop, output, edp_en, 1); break; case DRM_MODE_CONNECTOR_HDMIA: + VOP_REG_SET(vop, output, hdmi_dclk_pol, 1); VOP_REG_SET(vop, output, hdmi_pin_pol, pin_pol); VOP_REG_SET(vop, output, hdmi_en, 1); break; case DRM_MODE_CONNECTOR_DSI: + VOP_REG_SET(vop, output, mipi_dclk_pol, 1); VOP_REG_SET(vop, output, mipi_pin_pol, pin_pol); VOP_REG_SET(vop, output, mipi_en, 1); VOP_REG_SET(vop, output, mipi_dual_channel_en, !!(s->output_flags & ROCKCHIP_OUTPUT_DSI_DUAL)); break; case DRM_MODE_CONNECTOR_DisplayPort: - pin_pol &= ~BIT(DCLK_INVERT); + VOP_REG_SET(vop, output, dp_dclk_pol, 0); VOP_REG_SET(vop, output, dp_pin_pol, pin_pol); VOP_REG_SET(vop, output, dp_en, 1); break; @@ -1191,6 +1306,26 @@ static void vop_wait_for_irq_handler(struct vop *vop) synchronize_irq(vop->irq); } +static int vop_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_crtc_state *crtc_state) +{ + struct vop *vop = to_vop(crtc); + + if (vop->lut_regs && crtc_state->color_mgmt_changed && + crtc_state->gamma_lut) { + unsigned int len; + + len = drm_color_lut_size(crtc_state->gamma_lut); + if (len != crtc->gamma_size) { + DRM_DEBUG_KMS("Invalid LUT size; got %d, expected %d\n", + len, crtc->gamma_size); + return -EINVAL; + } + } + + return 0; +} + static void vop_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_crtc_state *old_crtc_state) { @@ -1243,6 +1378,8 @@ static void vop_crtc_atomic_flush(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs vop_crtc_helper_funcs = { .mode_fixup = vop_crtc_mode_fixup, + .atomic_check = vop_crtc_atomic_check, + .atomic_begin = vop_crtc_atomic_begin, .atomic_flush = vop_crtc_atomic_flush, .atomic_enable = vop_crtc_atomic_enable, .atomic_disable = vop_crtc_atomic_disable, @@ -1361,6 +1498,7 @@ static const struct drm_crtc_funcs vop_crtc_funcs = { .disable_vblank = vop_crtc_disable_vblank, .set_crc_source = vop_crtc_set_crc_source, .verify_crc_source = vop_crtc_verify_crc_source, + .gamma_set = drm_atomic_helper_legacy_gamma_set, }; static void vop_fb_unref_worker(struct drm_flip_work *work, void *val) @@ -1518,6 +1656,10 @@ static int vop_create_crtc(struct vop *vop) goto err_cleanup_planes; drm_crtc_helper_add(crtc, &vop_crtc_helper_funcs); + if (vop->lut_regs) { + drm_mode_crtc_set_gamma_size(crtc, vop_data->lut_size); + drm_crtc_enable_color_mgmt(crtc, 0, false, vop_data->lut_size); + } /* * Create drm_planes for overlay windows with possible_crtcs restricted @@ -1822,6 +1964,17 @@ static int vop_bind(struct device *dev, struct device *master, void *data) if (IS_ERR(vop->regs)) return PTR_ERR(vop->regs); + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res) { + if (!vop_data->lut_size) { + DRM_DEV_ERROR(dev, "no gamma LUT size defined\n"); + return -EINVAL; + } + vop->lut_regs = devm_ioremap_resource(dev, res); + if (IS_ERR(vop->lut_regs)) + return PTR_ERR(vop->lut_regs); + } + vop->regsbak = devm_kzalloc(dev, vop->len, GFP_KERNEL); if (!vop->regsbak) return -ENOMEM; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h index 2149a889c29d..0b3d18c457b2 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h @@ -46,10 +46,15 @@ struct vop_modeset { struct vop_output { struct vop_reg pin_pol; struct vop_reg dp_pin_pol; + struct vop_reg dp_dclk_pol; struct vop_reg edp_pin_pol; + struct vop_reg edp_dclk_pol; struct vop_reg hdmi_pin_pol; + struct vop_reg hdmi_dclk_pol; struct vop_reg mipi_pin_pol; + struct vop_reg mipi_dclk_pol; struct vop_reg rgb_pin_pol; + struct vop_reg rgb_dclk_pol; struct vop_reg dp_en; struct vop_reg edp_en; struct vop_reg hdmi_en; @@ -67,6 +72,7 @@ struct vop_common { struct vop_reg dither_down_mode; struct vop_reg dither_down_en; struct vop_reg dither_up; + struct vop_reg dsp_lut_en; struct vop_reg gate_en; struct vop_reg mmu_en; struct vop_reg out_mode; @@ -170,6 +176,7 @@ struct vop_data { const struct vop_win_yuv2yuv_data *win_yuv2yuv; const struct vop_win_data *win; unsigned int win_size; + unsigned int lut_size; #define VOP_FEATURE_OUTPUT_RGB10 BIT(0) #define VOP_FEATURE_INTERNAL_RGB BIT(1) @@ -294,8 +301,7 @@ enum dither_down_mode_sel { enum vop_pol { HSYNC_POSITIVE = 0, VSYNC_POSITIVE = 1, - DEN_NEGATIVE = 2, - DCLK_INVERT = 3 + DEN_NEGATIVE = 2 }; #define FRAC_16_16(mult, div) (((mult) << 16) / (div)) diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c index 64aefa856896..8a4c9af0ba73 100644 --- a/drivers/gpu/drm/rockchip/rockchip_lvds.c +++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c @@ -16,6 +16,7 @@ #include <linux/regmap.h> #include <linux/reset.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_dp_helper.h> #include <drm/drm_of.h> diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c index 89e0bb0fe0ab..ae730275a34f 100644 --- a/drivers/gpu/drm/rockchip/rockchip_rgb.c +++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c @@ -9,6 +9,7 @@ #include <linux/of_graph.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_dp_helper.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> @@ -135,7 +136,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, drm_encoder_helper_add(encoder, &rockchip_rgb_encoder_helper_funcs); if (panel) { - bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS); + bridge = drm_panel_bridge_add_typed(panel, + DRM_MODE_CONNECTOR_LVDS); if (IS_ERR(bridge)) return ERR_CAST(bridge); } diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c index d1494be14471..7a9d979c8d5d 100644 --- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c +++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c @@ -16,6 +16,7 @@ #include "rockchip_drm_vop.h" #include "rockchip_vop_reg.h" +#include "rockchip_drm_drv.h" #define _VOP_REG(off, _mask, _shift, _write_mask, _relaxed) \ { \ @@ -214,9 +215,11 @@ static const struct vop_modeset px30_modeset = { }; static const struct vop_output px30_output = { - .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 1), - .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 25), + .rgb_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 1), + .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 2), .rgb_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 0), + .mipi_dclk_pol = VOP_REG(PX30_DSP_CTRL0, 0x1, 25), + .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0x7, 26), .mipi_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 24), }; @@ -598,6 +601,7 @@ static const struct vop_common rk3288_common = { .dither_down_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 2), .pre_dither_down = VOP_REG(RK3288_DSP_CTRL1, 0x1, 1), .dither_up = VOP_REG(RK3288_DSP_CTRL1, 0x1, 6), + .dsp_lut_en = VOP_REG(RK3288_DSP_CTRL1, 0x1, 0), .data_blank = VOP_REG(RK3288_DSP_CTRL0, 0x1, 19), .dsp_blank = VOP_REG(RK3288_DSP_CTRL0, 0x3, 18), .out_mode = VOP_REG(RK3288_DSP_CTRL0, 0xf, 0), @@ -646,6 +650,7 @@ static const struct vop_data rk3288_vop = { .output = &rk3288_output, .win = rk3288_vop_win_data, .win_size = ARRAY_SIZE(rk3288_vop_win_data), + .lut_size = 1024, }; static const int rk3368_vop_intrs[] = { @@ -717,10 +722,14 @@ static const struct vop_win_data rk3368_vop_win_data[] = { }; static const struct vop_output rk3368_output = { - .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16), - .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20), - .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24), - .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28), + .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19), + .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23), + .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27), + .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31), + .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16), + .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20), + .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24), + .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28), .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12), .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13), .edp_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 14), @@ -764,11 +773,16 @@ static const struct vop_data rk3366_vop = { }; static const struct vop_output rk3399_output = { - .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0xf, 16), - .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 16), - .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 20), - .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 24), - .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0xf, 28), + .dp_dclk_pol = VOP_REG(RK3399_DSP_CTRL1, 0x1, 19), + .rgb_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 19), + .hdmi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 23), + .edp_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 27), + .mipi_dclk_pol = VOP_REG(RK3368_DSP_CTRL1, 0x1, 31), + .dp_pin_pol = VOP_REG(RK3399_DSP_CTRL1, 0x7, 16), + .rgb_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 16), + .hdmi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 20), + .edp_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 24), + .mipi_pin_pol = VOP_REG(RK3368_DSP_CTRL1, 0x7, 28), .dp_en = VOP_REG(RK3399_SYS_CTRL, 0x1, 11), .rgb_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 12), .hdmi_en = VOP_REG(RK3288_SYS_CTRL, 0x1, 13), @@ -872,14 +886,18 @@ static const struct vop_modeset rk3328_modeset = { }; static const struct vop_output rk3328_output = { + .rgb_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 19), + .hdmi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 23), + .edp_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 27), + .mipi_dclk_pol = VOP_REG(RK3328_DSP_CTRL1, 0x1, 31), .rgb_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 12), .hdmi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 13), .edp_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 14), .mipi_en = VOP_REG(RK3328_SYS_CTRL, 0x1, 15), - .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 16), - .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 20), - .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 24), - .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0xf, 28), + .rgb_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 16), + .hdmi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 20), + .edp_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 24), + .mipi_pin_pol = VOP_REG(RK3328_DSP_CTRL1, 0x7, 28), }; static const struct vop_misc rk3328_misc = { diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 1a5153197fe9..461a7a8129f4 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -23,6 +23,7 @@ #include <linux/kthread.h> #include <linux/slab.h> +#include <linux/completion.h> #include <drm/drm_print.h> #include <drm/gpu_scheduler.h> @@ -68,6 +69,8 @@ int drm_sched_entity_init(struct drm_sched_entity *entity, if (!entity->rq_list) return -ENOMEM; + init_completion(&entity->entity_idle); + for (i = 0; i < num_rq_list; ++i) entity->rq_list[i] = rq_list[i]; @@ -286,11 +289,12 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity) */ if (spsc_queue_count(&entity->job_queue)) { if (sched) { - /* Park the kernel for a moment to make sure it isn't processing - * our enity. + /* + * Wait for thread to idle to make sure it isn't processing + * this entity. */ - kthread_park(sched->thread); - kthread_unpark(sched->thread); + wait_for_completion(&entity->entity_idle); + } if (entity->dependency) { dma_fence_remove_callback(entity->dependency, diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c index 54977408f574..8b45c3a1b84e 100644 --- a/drivers/gpu/drm/scheduler/sched_fence.c +++ b/drivers/gpu/drm/scheduler/sched_fence.c @@ -128,13 +128,13 @@ static void drm_sched_fence_release_finished(struct dma_fence *f) dma_fence_put(&fence->scheduled); } -const struct dma_fence_ops drm_sched_fence_ops_scheduled = { +static const struct dma_fence_ops drm_sched_fence_ops_scheduled = { .get_driver_name = drm_sched_fence_get_driver_name, .get_timeline_name = drm_sched_fence_get_timeline_name, .release = drm_sched_fence_release_scheduled, }; -const struct dma_fence_ops drm_sched_fence_ops_finished = { +static const struct dma_fence_ops drm_sched_fence_ops_finished = { .get_driver_name = drm_sched_fence_get_driver_name, .get_timeline_name = drm_sched_fence_get_timeline_name, .release = drm_sched_fence_release_finished, diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index f39b97ed4ade..3c57e84222ca 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -47,6 +47,7 @@ #include <linux/kthread.h> #include <linux/wait.h> #include <linux/sched.h> +#include <linux/completion.h> #include <uapi/linux/sched/types.h> #include <drm/drm_print.h> @@ -134,6 +135,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) list_for_each_entry_continue(entity, &rq->entities, list) { if (drm_sched_entity_is_ready(entity)) { rq->current_entity = entity; + reinit_completion(&entity->entity_idle); spin_unlock(&rq->lock); return entity; } @@ -144,6 +146,7 @@ drm_sched_rq_select_entity(struct drm_sched_rq *rq) if (drm_sched_entity_is_ready(entity)) { rq->current_entity = entity; + reinit_completion(&entity->entity_idle); spin_unlock(&rq->lock); return entity; } @@ -496,8 +499,10 @@ void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched) fence = sched->ops->run_job(s_job); if (IS_ERR_OR_NULL(fence)) { + if (IS_ERR(fence)) + dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); + s_job->s_fence->parent = NULL; - dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); } else { s_job->s_fence->parent = fence; } @@ -632,43 +637,45 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb) } /** - * drm_sched_cleanup_jobs - destroy finished jobs + * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed * * @sched: scheduler instance * - * Remove all finished jobs from the mirror list and destroy them. + * Returns the next finished job from the mirror list (if there is one) + * ready for it to be destroyed. */ -static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched) +static struct drm_sched_job * +drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched) { + struct drm_sched_job *job; unsigned long flags; - /* Don't destroy jobs while the timeout worker is running */ - if (sched->timeout != MAX_SCHEDULE_TIMEOUT && - !cancel_delayed_work(&sched->work_tdr)) - return; - + /* + * Don't destroy jobs while the timeout worker is running OR thread + * is being parked and hence assumed to not touch ring_mirror_list + */ + if ((sched->timeout != MAX_SCHEDULE_TIMEOUT && + !cancel_delayed_work(&sched->work_tdr)) || + __kthread_should_park(sched->thread)) + return NULL; - while (!list_empty(&sched->ring_mirror_list)) { - struct drm_sched_job *job; + spin_lock_irqsave(&sched->job_list_lock, flags); - job = list_first_entry(&sched->ring_mirror_list, + job = list_first_entry_or_null(&sched->ring_mirror_list, struct drm_sched_job, node); - if (!dma_fence_is_signaled(&job->s_fence->finished)) - break; - spin_lock_irqsave(&sched->job_list_lock, flags); + if (job && dma_fence_is_signaled(&job->s_fence->finished)) { /* remove job from ring_mirror_list */ list_del_init(&job->node); - spin_unlock_irqrestore(&sched->job_list_lock, flags); - - sched->ops->free_job(job); + } else { + job = NULL; + /* queue timeout for next job */ + drm_sched_start_timeout(sched); } - /* queue timeout for next job */ - spin_lock_irqsave(&sched->job_list_lock, flags); - drm_sched_start_timeout(sched); spin_unlock_irqrestore(&sched->job_list_lock, flags); + return job; } /** @@ -708,17 +715,27 @@ static int drm_sched_main(void *param) struct drm_sched_fence *s_fence; struct drm_sched_job *sched_job; struct dma_fence *fence; + struct drm_sched_job *cleanup_job = NULL; wait_event_interruptible(sched->wake_up_worker, - (drm_sched_cleanup_jobs(sched), + (cleanup_job = drm_sched_get_cleanup_job(sched)) || (!drm_sched_blocked(sched) && (entity = drm_sched_select_entity(sched))) || - kthread_should_stop())); + kthread_should_stop()); + + if (cleanup_job) { + sched->ops->free_job(cleanup_job); + /* queue timeout for next job */ + drm_sched_start_timeout(sched); + } if (!entity) continue; sched_job = drm_sched_entity_pop_job(entity); + + complete(&entity->entity_idle); + if (!sched_job) continue; @@ -741,8 +758,9 @@ static int drm_sched_main(void *param) r); dma_fence_put(fence); } else { + if (IS_ERR(fence)) + dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); - dma_fence_set_error(&s_fence->finished, PTR_ERR(fence)); drm_sched_process_job(NULL, &sched_job->cb); } diff --git a/drivers/gpu/drm/selftests/Makefile b/drivers/gpu/drm/selftests/Makefile index aae88f8a016c..d2137342b371 100644 --- a/drivers/gpu/drm/selftests/Makefile +++ b/drivers/gpu/drm/selftests/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only test-drm_modeset-y := test-drm_modeset_common.o test-drm_plane_helper.o \ test-drm_format.o test-drm_framebuffer.o \ - test-drm_damage_helper.o + test-drm_damage_helper.o test-drm_dp_mst_helper.o obj-$(CONFIG_DRM_DEBUG_SELFTEST) += test-drm_mm.o test-drm_modeset.o test-drm_cmdline_parser.o diff --git a/drivers/gpu/drm/selftests/drm_modeset_selftests.h b/drivers/gpu/drm/selftests/drm_modeset_selftests.h index 464753746013..1898de0b4a4d 100644 --- a/drivers/gpu/drm/selftests/drm_modeset_selftests.h +++ b/drivers/gpu/drm/selftests/drm_modeset_selftests.h @@ -32,3 +32,5 @@ selftest(damage_iter_damage_one_intersect, igt_damage_iter_damage_one_intersect) selftest(damage_iter_damage_one_outside, igt_damage_iter_damage_one_outside) selftest(damage_iter_damage_src_moved, igt_damage_iter_damage_src_moved) selftest(damage_iter_damage_not_visible, igt_damage_iter_damage_not_visible) +selftest(dp_mst_calc_pbn_mode, igt_dp_mst_calc_pbn_mode) +selftest(dp_mst_sideband_msg_req_decode, igt_dp_mst_sideband_msg_req_decode) diff --git a/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c new file mode 100644 index 000000000000..af2b2de65316 --- /dev/null +++ b/drivers/gpu/drm/selftests/test-drm_dp_mst_helper.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Test cases for for the DRM DP MST helpers + */ + +#define PREFIX_STR "[drm_dp_mst_helper]" + +#include <drm/drm_dp_mst_helper.h> +#include <drm/drm_print.h> + +#include "../drm_dp_mst_topology_internal.h" +#include "test-drm_modeset_common.h" + +int igt_dp_mst_calc_pbn_mode(void *ignored) +{ + int pbn, i; + const struct { + int rate; + int bpp; + int expected; + } test_params[] = { + { 154000, 30, 689 }, + { 234000, 30, 1047 }, + { 297000, 24, 1063 }, + }; + + for (i = 0; i < ARRAY_SIZE(test_params); i++) { + pbn = drm_dp_calc_pbn_mode(test_params[i].rate, + test_params[i].bpp); + FAIL(pbn != test_params[i].expected, + "Expected PBN %d for clock %d bpp %d, got %d\n", + test_params[i].expected, test_params[i].rate, + test_params[i].bpp, pbn); + } + + return 0; +} + +static bool +sideband_msg_req_equal(const struct drm_dp_sideband_msg_req_body *in, + const struct drm_dp_sideband_msg_req_body *out) +{ + const struct drm_dp_remote_i2c_read_tx *txin, *txout; + int i; + + if (in->req_type != out->req_type) + return false; + + switch (in->req_type) { + /* + * Compare struct members manually for request types which can't be + * compared simply using memcmp(). This is because said request types + * contain pointers to other allocated structs + */ + case DP_REMOTE_I2C_READ: +#define IN in->u.i2c_read +#define OUT out->u.i2c_read + if (IN.num_bytes_read != OUT.num_bytes_read || + IN.num_transactions != OUT.num_transactions || + IN.port_number != OUT.port_number || + IN.read_i2c_device_id != OUT.read_i2c_device_id) + return false; + + for (i = 0; i < IN.num_transactions; i++) { + txin = &IN.transactions[i]; + txout = &OUT.transactions[i]; + + if (txin->i2c_dev_id != txout->i2c_dev_id || + txin->no_stop_bit != txout->no_stop_bit || + txin->num_bytes != txout->num_bytes || + txin->i2c_transaction_delay != + txout->i2c_transaction_delay) + return false; + + if (memcmp(txin->bytes, txout->bytes, + txin->num_bytes) != 0) + return false; + } + break; +#undef IN +#undef OUT + + case DP_REMOTE_DPCD_WRITE: +#define IN in->u.dpcd_write +#define OUT out->u.dpcd_write + if (IN.dpcd_address != OUT.dpcd_address || + IN.num_bytes != OUT.num_bytes || + IN.port_number != OUT.port_number) + return false; + + return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0; +#undef IN +#undef OUT + + case DP_REMOTE_I2C_WRITE: +#define IN in->u.i2c_write +#define OUT out->u.i2c_write + if (IN.port_number != OUT.port_number || + IN.write_i2c_device_id != OUT.write_i2c_device_id || + IN.num_bytes != OUT.num_bytes) + return false; + + return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0; +#undef IN +#undef OUT + + default: + return memcmp(in, out, sizeof(*in)) == 0; + } + + return true; +} + +static bool +sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in) +{ + struct drm_dp_sideband_msg_req_body out = {0}; + struct drm_printer p = drm_err_printer(PREFIX_STR); + struct drm_dp_sideband_msg_tx txmsg; + int i, ret; + + drm_dp_encode_sideband_req(in, &txmsg); + ret = drm_dp_decode_sideband_req(&txmsg, &out); + if (ret < 0) { + drm_printf(&p, "Failed to decode sideband request: %d\n", + ret); + return false; + } + + if (!sideband_msg_req_equal(in, &out)) { + drm_printf(&p, "Encode/decode failed, expected:\n"); + drm_dp_dump_sideband_msg_req_body(in, 1, &p); + drm_printf(&p, "Got:\n"); + drm_dp_dump_sideband_msg_req_body(&out, 1, &p); + return false; + } + + switch (in->req_type) { + case DP_REMOTE_DPCD_WRITE: + kfree(out.u.dpcd_write.bytes); + break; + case DP_REMOTE_I2C_READ: + for (i = 0; i < out.u.i2c_read.num_transactions; i++) + kfree(out.u.i2c_read.transactions[i].bytes); + break; + case DP_REMOTE_I2C_WRITE: + kfree(out.u.i2c_write.bytes); + break; + } + + /* Clear everything but the req_type for the input */ + memset(&in->u, 0, sizeof(in->u)); + + return true; +} + +int igt_dp_mst_sideband_msg_req_decode(void *unused) +{ + struct drm_dp_sideband_msg_req_body in = { 0 }; + u8 data[] = { 0xff, 0x0, 0xdd }; + int i; + +#define DO_TEST() FAIL_ON(!sideband_msg_req_encode_decode(&in)) + + in.req_type = DP_ENUM_PATH_RESOURCES; + in.u.port_num.port_number = 5; + DO_TEST(); + + in.req_type = DP_POWER_UP_PHY; + in.u.port_num.port_number = 5; + DO_TEST(); + + in.req_type = DP_POWER_DOWN_PHY; + in.u.port_num.port_number = 5; + DO_TEST(); + + in.req_type = DP_ALLOCATE_PAYLOAD; + in.u.allocate_payload.number_sdp_streams = 3; + for (i = 0; i < in.u.allocate_payload.number_sdp_streams; i++) + in.u.allocate_payload.sdp_stream_sink[i] = i + 1; + DO_TEST(); + in.u.allocate_payload.port_number = 0xf; + DO_TEST(); + in.u.allocate_payload.vcpi = 0x7f; + DO_TEST(); + in.u.allocate_payload.pbn = U16_MAX; + DO_TEST(); + + in.req_type = DP_QUERY_PAYLOAD; + in.u.query_payload.port_number = 0xf; + DO_TEST(); + in.u.query_payload.vcpi = 0x7f; + DO_TEST(); + + in.req_type = DP_REMOTE_DPCD_READ; + in.u.dpcd_read.port_number = 0xf; + DO_TEST(); + in.u.dpcd_read.dpcd_address = 0xfedcb; + DO_TEST(); + in.u.dpcd_read.num_bytes = U8_MAX; + DO_TEST(); + + in.req_type = DP_REMOTE_DPCD_WRITE; + in.u.dpcd_write.port_number = 0xf; + DO_TEST(); + in.u.dpcd_write.dpcd_address = 0xfedcb; + DO_TEST(); + in.u.dpcd_write.num_bytes = ARRAY_SIZE(data); + in.u.dpcd_write.bytes = data; + DO_TEST(); + + in.req_type = DP_REMOTE_I2C_READ; + in.u.i2c_read.port_number = 0xf; + DO_TEST(); + in.u.i2c_read.read_i2c_device_id = 0x7f; + DO_TEST(); + in.u.i2c_read.num_transactions = 3; + in.u.i2c_read.num_bytes_read = ARRAY_SIZE(data) * 3; + for (i = 0; i < in.u.i2c_read.num_transactions; i++) { + in.u.i2c_read.transactions[i].bytes = data; + in.u.i2c_read.transactions[i].num_bytes = ARRAY_SIZE(data); + in.u.i2c_read.transactions[i].i2c_dev_id = 0x7f & ~i; + in.u.i2c_read.transactions[i].i2c_transaction_delay = 0xf & ~i; + } + DO_TEST(); + + in.req_type = DP_REMOTE_I2C_WRITE; + in.u.i2c_write.port_number = 0xf; + DO_TEST(); + in.u.i2c_write.write_i2c_device_id = 0x7f; + DO_TEST(); + in.u.i2c_write.num_bytes = ARRAY_SIZE(data); + in.u.i2c_write.bytes = data; + DO_TEST(); + +#undef DO_TEST + return 0; +} diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c index 74d5561a862b..2d29ea6f92e2 100644 --- a/drivers/gpu/drm/selftests/test-drm_framebuffer.c +++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c @@ -126,7 +126,7 @@ static struct drm_framebuffer_test createbuffer_tests[] = { .handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH - 1, 0 }, } }, -{ .buffer_created = 0, .name = "NV12 Invalid modifier/misssing DRM_MODE_FB_MODIFIERS flag", +{ .buffer_created = 0, .name = "NV12 Invalid modifier/missing DRM_MODE_FB_MODIFIERS flag", .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12, .handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH, 0 }, diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c index 388f9844f4ba..9aabe82dcd3a 100644 --- a/drivers/gpu/drm/selftests/test-drm_mm.c +++ b/drivers/gpu/drm/selftests/test-drm_mm.c @@ -854,7 +854,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm, if (start > 0) { node = __drm_mm_interval_first(mm, 0, start - 1); - if (node->allocated) { + if (drm_mm_node_allocated(node)) { pr_err("node before start: node=%llx+%llu, start=%llx\n", node->start, node->size, start); return false; @@ -863,7 +863,7 @@ static bool assert_contiguous_in_range(struct drm_mm *mm, if (end < U64_MAX) { node = __drm_mm_interval_first(mm, end, U64_MAX); - if (node->allocated) { + if (drm_mm_node_allocated(node)) { pr_err("node after end: node=%llx+%llu, end=%llx\n", node->start, node->size, end); return false; @@ -1156,12 +1156,12 @@ static void show_holes(const struct drm_mm *mm, int count) struct drm_mm_node *next = list_next_entry(hole, node_list); const char *node1 = NULL, *node2 = NULL; - if (hole->allocated) + if (drm_mm_node_allocated(hole)) node1 = kasprintf(GFP_KERNEL, "[%llx + %lld, color=%ld], ", hole->start, hole->size, hole->color); - if (next->allocated) + if (drm_mm_node_allocated(next)) node2 = kasprintf(GFP_KERNEL, ", [%llx + %lld, color=%ld]", next->start, next->size, next->color); @@ -1900,18 +1900,18 @@ static void separate_adjacent_colors(const struct drm_mm_node *node, u64 *start, u64 *end) { - if (node->allocated && node->color != color) + if (drm_mm_node_allocated(node) && node->color != color) ++*start; node = list_next_entry(node, node_list); - if (node->allocated && node->color != color) + if (drm_mm_node_allocated(node) && node->color != color) --*end; } static bool colors_abutt(const struct drm_mm_node *node) { if (!drm_mm_hole_follows(node) && - list_next_entry(node, node_list)->allocated) { + drm_mm_node_allocated(list_next_entry(node, node_list))) { pr_err("colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n", node->color, node->start, node->size, list_next_entry(node, node_list)->color, diff --git a/drivers/gpu/drm/selftests/test-drm_modeset_common.h b/drivers/gpu/drm/selftests/test-drm_modeset_common.h index 8c76f09c12d1..0fcb8bbc6a1b 100644 --- a/drivers/gpu/drm/selftests/test-drm_modeset_common.h +++ b/drivers/gpu/drm/selftests/test-drm_modeset_common.h @@ -39,5 +39,7 @@ int igt_damage_iter_damage_one_intersect(void *ignored); int igt_damage_iter_damage_one_outside(void *ignored); int igt_damage_iter_damage_src_moved(void *ignored); int igt_damage_iter_damage_not_visible(void *ignored); +int igt_dp_mst_calc_pbn_mode(void *ignored); +int igt_dp_mst_sideband_msg_req_decode(void *ignored); #endif diff --git a/drivers/gpu/drm/sti/sti_cursor.c b/drivers/gpu/drm/sti/sti_cursor.c index 0bf7c332cf0b..ea64c1dcaf63 100644 --- a/drivers/gpu/drm/sti/sti_cursor.c +++ b/drivers/gpu/drm/sti/sti_cursor.c @@ -47,7 +47,7 @@ struct dma_pixmap { void *base; }; -/** +/* * STI Cursor structure * * @sti_plane: sti_plane structure diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index e55870190bf5..68289b0b063a 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -12,6 +12,7 @@ #include <linux/platform_device.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_device.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> @@ -65,7 +66,7 @@ static struct dvo_config rgb_24bit_de_cfg = { .awg_fwgen_fct = sti_awg_generate_code_data_enable_mode, }; -/** +/* * STI digital video output structure * * @dev: driver device diff --git a/drivers/gpu/drm/sti/sti_gdp.c b/drivers/gpu/drm/sti/sti_gdp.c index 8e926cd6a1c8..11595c748844 100644 --- a/drivers/gpu/drm/sti/sti_gdp.c +++ b/drivers/gpu/drm/sti/sti_gdp.c @@ -103,7 +103,7 @@ struct sti_gdp_node_list { dma_addr_t btm_field_paddr; }; -/** +/* * STI GDP structure * * @sti_plane: sti_plane structure diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c index 94e404f13234..8f7bf33815fd 100644 --- a/drivers/gpu/drm/sti/sti_hda.c +++ b/drivers/gpu/drm/sti/sti_hda.c @@ -12,6 +12,7 @@ #include <linux/seq_file.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_debugfs.h> #include <drm/drm_device.h> #include <drm/drm_file.h> @@ -230,7 +231,7 @@ static const struct sti_hda_video_config hda_supported_modes[] = { AWGi_720x480p_60, NN_720x480p_60, VID_ED} }; -/** +/* * STI hd analog structure * * @dev: driver device diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 9862c322f0c4..814560ead4e1 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -9,11 +9,12 @@ #include <linux/debugfs.h> #include <linux/hdmi.h> #include <linux/module.h> -#include <linux/of_gpio.h> +#include <linux/io.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> #include <drm/drm_edid.h> @@ -333,7 +334,6 @@ static void hdmi_infoframe_reset(struct sti_hdmi *hdmi, * Helper to concatenate infoframe in 32 bits word * * @ptr: pointer on the hdmi internal structure - * @data: infoframe to write * @size: size to write */ static inline unsigned int hdmi_infoframe_subpack(const u8 *ptr, size_t size) @@ -543,13 +543,14 @@ static int hdmi_vendor_infoframe_config(struct sti_hdmi *hdmi) return 0; } +#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */ + /** * Software reset of the hdmi subsystem * * @hdmi: pointer on the hdmi internal structure * */ -#define HDMI_TIMEOUT_SWRESET 100 /*milliseconds */ static void hdmi_swreset(struct sti_hdmi *hdmi) { u32 val; @@ -1256,6 +1257,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) struct drm_device *drm_dev = data; struct drm_encoder *encoder; struct sti_hdmi_connector *connector; + struct cec_connector_info conn_info; struct drm_connector *drm_connector; struct drm_bridge *bridge; int err; @@ -1318,6 +1320,14 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) goto err_sysfs; } + cec_fill_conn_info_from_drm(&conn_info, drm_connector); + hdmi->notifier = cec_notifier_conn_register(&hdmi->dev, NULL, + &conn_info); + if (!hdmi->notifier) { + hdmi->drm_connector = NULL; + return -ENOMEM; + } + /* Enable default interrupts */ hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN); @@ -1331,6 +1341,9 @@ err_sysfs: static void sti_hdmi_unbind(struct device *dev, struct device *master, void *data) { + struct sti_hdmi *hdmi = dev_get_drvdata(dev); + + cec_notifier_conn_unregister(hdmi->notifier); } static const struct component_ops sti_hdmi_ops = { @@ -1436,10 +1449,6 @@ static int sti_hdmi_probe(struct platform_device *pdev) goto release_adapter; } - hdmi->notifier = cec_notifier_get(&pdev->dev); - if (!hdmi->notifier) - goto release_adapter; - hdmi->reset = devm_reset_control_get(dev, "hdmi"); /* Take hdmi out of reset */ if (!IS_ERR(hdmi->reset)) @@ -1459,14 +1468,11 @@ static int sti_hdmi_remove(struct platform_device *pdev) { struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev); - cec_notifier_set_phys_addr(hdmi->notifier, CEC_PHYS_ADDR_INVALID); - i2c_put_adapter(hdmi->ddc_adapt); if (hdmi->audio_pdev) platform_device_unregister(hdmi->audio_pdev); component_del(&pdev->dev, &sti_hdmi_ops); - cec_notifier_put(hdmi->notifier); return 0; } diff --git a/drivers/gpu/drm/sti/sti_tvout.c b/drivers/gpu/drm/sti/sti_tvout.c index aba79c172512..5767e93dd1cd 100644 --- a/drivers/gpu/drm/sti/sti_tvout.c +++ b/drivers/gpu/drm/sti/sti_tvout.c @@ -157,9 +157,9 @@ static void tvout_write(struct sti_tvout *tvout, u32 val, int offset) * * @tvout: tvout structure * @reg: register to set - * @cr_r: - * @y_g: - * @cb_b: + * @cr_r: red chroma or red order + * @y_g: y or green order + * @cb_b: blue chroma or blue order */ static void tvout_vip_set_color_order(struct sti_tvout *tvout, int reg, u32 cr_r, u32 y_g, u32 cb_b) @@ -214,7 +214,7 @@ static void tvout_vip_set_rnd(struct sti_tvout *tvout, int reg, u32 rnd) * @tvout: tvout structure * @reg: register to set * @main_path: main or auxiliary path - * @sel_input: selected_input (main/aux + conv) + * @video_out: selected_input (main/aux + conv) */ static void tvout_vip_set_sel_input(struct sti_tvout *tvout, int reg, @@ -251,7 +251,7 @@ static void tvout_vip_set_sel_input(struct sti_tvout *tvout, * * @tvout: tvout structure * @reg: register to set - * @in_vid_signed: used video input format + * @in_vid_fmt: used video input format */ static void tvout_vip_set_in_vid_fmt(struct sti_tvout *tvout, int reg, u32 in_vid_fmt) diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index ef4009f11396..0b17ac8a3faa 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -121,7 +121,7 @@ struct sti_vtg_sync_params { u32 vsync_off_bot; }; -/** +/* * STI VTG structure * * @regs: register mapping diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c index a03a642c147c..514efefb0016 100644 --- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c +++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c @@ -260,8 +260,11 @@ dw_mipi_dsi_get_lane_mbps(void *priv_data, const struct drm_display_mode *mode, /* Compute requested pll out */ bpp = mipi_dsi_pixel_format_to_bpp(format); pll_out_khz = mode->clock * bpp / lanes; + /* Add 20% to pll out to be higher than pixel bw (burst mode only) */ - pll_out_khz = (pll_out_khz * 12) / 10; + if (mode_flags & MIPI_DSI_MODE_VIDEO_BURST) + pll_out_khz = (pll_out_khz * 12) / 10; + if (pll_out_khz > dsi->lane_max_kbps) { pll_out_khz = dsi->lane_max_kbps; DRM_WARN("Warning max phy mbps is used\n"); diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index 3ab4fbf8eb0d..5b51298921cf 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -15,6 +15,7 @@ #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_graph.h> +#include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> @@ -1040,6 +1041,36 @@ static const struct drm_encoder_funcs ltdc_encoder_funcs = { .destroy = drm_encoder_cleanup, }; +static void ltdc_encoder_disable(struct drm_encoder *encoder) +{ + struct drm_device *ddev = encoder->dev; + + DRM_DEBUG_DRIVER("\n"); + + /* Set to sleep state the pinctrl whatever type of encoder */ + pinctrl_pm_select_sleep_state(ddev->dev); +} + +static void ltdc_encoder_enable(struct drm_encoder *encoder) +{ + struct drm_device *ddev = encoder->dev; + + DRM_DEBUG_DRIVER("\n"); + + /* + * Set to default state the pinctrl only with DPI type. + * Others types like DSI, don't need pinctrl due to + * internal bridge (the signals do not come out of the chipset). + */ + if (encoder->encoder_type == DRM_MODE_ENCODER_DPI) + pinctrl_pm_select_default_state(ddev->dev); +} + +static const struct drm_encoder_helper_funcs ltdc_encoder_helper_funcs = { + .disable = ltdc_encoder_disable, + .enable = ltdc_encoder_enable, +}; + static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge) { struct drm_encoder *encoder; @@ -1055,6 +1086,8 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge) drm_encoder_init(ddev, encoder, <dc_encoder_funcs, DRM_MODE_ENCODER_DPI, NULL); + drm_encoder_helper_add(encoder, <dc_encoder_helper_funcs); + ret = drm_bridge_attach(encoder, bridge, NULL); if (ret) { drm_encoder_cleanup(encoder); @@ -1236,8 +1269,8 @@ int ltdc_load(struct drm_device *ddev) /* Add endpoints panels or bridges if any */ for (i = 0; i < MAX_ENDPOINTS; i++) { if (panel[i]) { - bridge[i] = drm_panel_bridge_add(panel[i], - DRM_MODE_CONNECTOR_DPI); + bridge[i] = drm_panel_bridge_add_typed(panel[i], + DRM_MODE_CONNECTOR_DPI); if (IS_ERR(bridge[i])) { DRM_ERROR("panel-bridge endpoint %d\n", i); ret = PTR_ERR(bridge[i]); @@ -1280,6 +1313,8 @@ int ltdc_load(struct drm_device *ddev) clk_disable_unprepare(ldev->pixel_clk); + pinctrl_pm_select_sleep_state(ddev->dev); + pm_runtime_enable(ddev->dev); return 0; diff --git a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c index eb8071a4d6d0..a7c4654445c7 100644 --- a/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c +++ b/drivers/gpu/drm/sun4i/sun4i_hdmi_enc.c @@ -490,6 +490,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, { struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = data; + struct cec_connector_info conn_info; struct sun4i_drv *drv = drm->dev_private; struct sun4i_hdmi *hdmi; struct resource *res; @@ -629,8 +630,7 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, #ifdef CONFIG_DRM_SUN4I_HDMI_CEC hdmi->cec_adap = cec_pin_allocate_adapter(&sun4i_hdmi_cec_pin_ops, - hdmi, "sun4i", CEC_CAP_TRANSMIT | CEC_CAP_LOG_ADDRS | - CEC_CAP_PASSTHROUGH | CEC_CAP_RC); + hdmi, "sun4i", CEC_CAP_DEFAULTS | CEC_CAP_CONNECTOR_INFO); ret = PTR_ERR_OR_ZERO(hdmi->cec_adap); if (ret < 0) goto err_cleanup_connector; @@ -649,6 +649,8 @@ static int sun4i_hdmi_bind(struct device *dev, struct device *master, "Couldn't initialise the HDMI connector\n"); goto err_cleanup_connector; } + cec_fill_conn_info_from_drm(&conn_info, &hdmi->connector); + cec_s_conn_info(hdmi->cec_adap, &conn_info); /* There is no HPD interrupt, so we need to poll the controller */ hdmi->connector.polled = DRM_CONNECTOR_POLL_CONNECT | diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c index 7fbf425acb55..25ab2ef6d545 100644 --- a/drivers/gpu/drm/sun4i/sun4i_lvds.c +++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c @@ -7,6 +7,7 @@ #include <linux/clk.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index aac56983f208..e74b9eddca01 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -9,6 +9,7 @@ #include <linux/clk.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_of.h> #include <drm/drm_panel.h> #include <drm/drm_print.h> diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index b89439ed210d..42651d737c55 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -16,6 +16,7 @@ #include <linux/reset.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_connector.h> #include <drm/drm_crtc.h> #include <drm/drm_encoder.h> diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c index 1636344ba9ec..c958ca9bae63 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c @@ -16,6 +16,7 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> +#include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/slab.h> @@ -365,8 +366,7 @@ static void sun6i_dsi_inst_init(struct sun6i_dsi *dsi, static u16 sun6i_dsi_get_video_start_delay(struct sun6i_dsi *dsi, struct drm_display_mode *mode) { - u16 start = clamp(mode->vtotal - mode->vdisplay - 10, 8, 100); - u16 delay = mode->vtotal - (mode->vsync_end - mode->vdisplay) + start; + u16 delay = mode->vtotal - (mode->vsync_start - mode->vdisplay) + 1; if (delay > mode->vtotal) delay = delay % mode->vtotal; @@ -437,9 +437,9 @@ static void sun6i_dsi_setup_burst(struct sun6i_dsi *dsi, SUN6I_DSI_BURST_LINE_SYNC_POINT(SUN6I_DSI_SYNC_POINT)); val = SUN6I_DSI_TCON_DRQ_ENABLE_MODE; - } else if ((mode->hsync_end - mode->hdisplay) > 20) { + } else if ((mode->hsync_start - mode->hdisplay) > 20) { /* Maaaaaagic */ - u16 drq = (mode->hsync_end - mode->hdisplay) - 20; + u16 drq = (mode->hsync_start - mode->hdisplay) - 20; drq *= mipi_dsi_pixel_format_to_bpp(device->format); drq /= 32; @@ -569,11 +569,12 @@ static void sun6i_dsi_setup_timings(struct sun6i_dsi *dsi, (mode->htotal - mode->hsync_end) * Bpp - HBP_PACKET_OVERHEAD); /* - * The frontporch is set using a blanking packet (4 - * bytes + payload + 2 bytes). Its minimal size is - * therefore 6 bytes + * The frontporch is set using a sync event (4 bytes) + * and two blanking packets (each one is 4 bytes + + * payload + 2 bytes). Its minimal size is therefore + * 16 bytes */ -#define HFP_PACKET_OVERHEAD 6 +#define HFP_PACKET_OVERHEAD 16 hfp = max((unsigned int)HFP_PACKET_OVERHEAD, (mode->hsync_start - mode->hdisplay) * Bpp - HFP_PACKET_OVERHEAD); @@ -831,8 +832,8 @@ static u32 sun6i_dsi_dcs_build_pkt_hdr(struct sun6i_dsi *dsi, u32 pkt = msg->type; if (msg->type == MIPI_DSI_DCS_LONG_WRITE) { - pkt |= ((msg->tx_len + 1) & 0xffff) << 8; - pkt |= (((msg->tx_len + 1) >> 8) & 0xffff) << 16; + pkt |= ((msg->tx_len) & 0xffff) << 8; + pkt |= (((msg->tx_len) >> 8) & 0xffff) << 16; } else { pkt |= (((u8 *)msg->tx_buf)[0] << 8); if (msg->tx_len > 1) @@ -1100,6 +1101,12 @@ static int sun6i_dsi_probe(struct platform_device *pdev) return PTR_ERR(base); } + dsi->regulator = devm_regulator_get(dev, "vcc-dsi"); + if (IS_ERR(dsi->regulator)) { + dev_err(dev, "Couldn't get VCC-DSI supply\n"); + return PTR_ERR(dsi->regulator); + } + dsi->regs = devm_regmap_init_mmio_clk(dev, "bus", base, &sun6i_dsi_regmap_config); if (IS_ERR(dsi->regs)) { @@ -1173,6 +1180,13 @@ static int sun6i_dsi_remove(struct platform_device *pdev) static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev) { struct sun6i_dsi *dsi = dev_get_drvdata(dev); + int err; + + err = regulator_enable(dsi->regulator); + if (err) { + dev_err(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err); + return err; + } reset_control_deassert(dsi->reset); clk_prepare_enable(dsi->mod_clk); @@ -1205,6 +1219,7 @@ static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev) clk_disable_unprepare(dsi->mod_clk); reset_control_assert(dsi->reset); + regulator_disable(dsi->regulator); return 0; } diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h index 5c3ad5be0690..3f4846f581ef 100644 --- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h +++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h @@ -23,6 +23,7 @@ struct sun6i_dsi { struct clk *bus_clk; struct clk *mod_clk; struct regmap *regs; + struct regulator *regulator; struct reset_control *reset; struct phy *dphy; diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index a44dca4b0219..e8a317d5ba19 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -226,6 +226,7 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, sun8i_hdmi_phy_init(hdmi->phy); plat_data->mode_valid = hdmi->quirks->mode_valid; + plat_data->use_drm_infoframe = hdmi->quirks->use_drm_infoframe; sun8i_hdmi_phy_set_ops(hdmi->phy, plat_data); platform_set_drvdata(pdev, hdmi); @@ -300,6 +301,7 @@ static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = { static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = { .mode_valid = sun8i_dw_hdmi_mode_valid_h6, + .use_drm_infoframe = true, }; static const struct of_device_id sun8i_dw_hdmi_dt_ids[] = { diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h index d707c9171824..8e64945167e9 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h @@ -179,6 +179,7 @@ struct sun8i_dw_hdmi_quirks { enum drm_mode_status (*mode_valid)(struct drm_connector *connector, const struct drm_display_mode *mode); unsigned int set_rate : 1; + unsigned int use_drm_infoframe : 1; }; struct sun8i_dw_hdmi { diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig index 1d1269fde3c1..5043dcaf1cf9 100644 --- a/drivers/gpu/drm/tegra/Kconfig +++ b/drivers/gpu/drm/tegra/Kconfig @@ -9,7 +9,7 @@ config DRM_TEGRA select DRM_MIPI_DSI select DRM_PANEL select TEGRA_HOST1X - select IOMMU_IOVA if IOMMU_SUPPORT + select IOMMU_IOVA select CEC_CORE if CEC_NOTIFIER help Choose this option if you have an NVIDIA Tegra SoC. diff --git a/drivers/gpu/drm/tegra/Makefile b/drivers/gpu/drm/tegra/Makefile index 33c463e8d49f..d6cf202414f0 100644 --- a/drivers/gpu/drm/tegra/Makefile +++ b/drivers/gpu/drm/tegra/Makefile @@ -5,6 +5,7 @@ tegra-drm-y := \ drm.o \ gem.o \ fb.o \ + dp.o \ hub.o \ plane.o \ dc.o \ diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c index fbf57bc3cdab..5b1f9ff97576 100644 --- a/drivers/gpu/drm/tegra/dc.c +++ b/drivers/gpu/drm/tegra/dc.c @@ -715,9 +715,7 @@ static void tegra_plane_atomic_update(struct drm_plane *plane, window.swap = state->swap; for (i = 0; i < fb->format->num_planes; i++) { - struct tegra_bo *bo = tegra_fb_get_plane(fb, i); - - window.base[i] = bo->paddr + fb->offsets[i]; + window.base[i] = state->iova[i] + fb->offsets[i]; /* * Tegra uses a shared stride for UV planes. Framebuffers are @@ -732,6 +730,8 @@ static void tegra_plane_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs tegra_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_plane_atomic_check, .atomic_disable = tegra_plane_atomic_disable, .atomic_update = tegra_plane_atomic_update, @@ -869,11 +869,11 @@ static void tegra_cursor_atomic_update(struct drm_plane *plane, return; } - value |= (bo->paddr >> 10) & 0x3fffff; + value |= (bo->iova >> 10) & 0x3fffff; tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - value = (bo->paddr >> 32) & 0x3; + value = (bo->iova >> 32) & 0x3; tegra_dc_writel(dc, value, DC_DISP_CURSOR_START_ADDR_HI); #endif @@ -914,6 +914,8 @@ static void tegra_cursor_atomic_disable(struct drm_plane *plane, } static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_cursor_atomic_check, .atomic_update = tegra_cursor_atomic_update, .atomic_disable = tegra_cursor_atomic_disable, @@ -2014,9 +2016,8 @@ static int tegra_dc_init(struct host1x_client *client) if (!dc->syncpt) dev_warn(dc->dev, "failed to allocate syncpoint\n"); - dc->group = host1x_client_iommu_attach(client, true); - if (IS_ERR(dc->group)) { - err = PTR_ERR(dc->group); + err = host1x_client_iommu_attach(client); + if (err < 0) { dev_err(client->dev, "failed to attach to domain: %d\n", err); return err; } @@ -2074,6 +2075,12 @@ static int tegra_dc_init(struct host1x_client *client) goto cleanup; } + /* + * Inherit the DMA parameters (such as maximum segment size) from the + * parent device. + */ + client->dev->dma_parms = client->parent->dma_parms; + return 0; cleanup: @@ -2083,7 +2090,7 @@ cleanup: if (!IS_ERR(primary)) drm_plane_cleanup(primary); - host1x_client_iommu_detach(client, dc->group); + host1x_client_iommu_detach(client); host1x_syncpt_free(dc->syncpt); return err; @@ -2097,6 +2104,9 @@ static int tegra_dc_exit(struct host1x_client *client) if (!tegra_dc_has_window_groups(dc)) return 0; + /* avoid a dangling pointer just in case this disappears */ + client->dev->dma_parms = NULL; + devm_free_irq(dc->dev, dc->irq, dc); err = tegra_dc_rgb_exit(dc); @@ -2105,7 +2115,7 @@ static int tegra_dc_exit(struct host1x_client *client) return err; } - host1x_client_iommu_detach(client, dc->group); + host1x_client_iommu_detach(client); host1x_syncpt_free(dc->syncpt); return 0; diff --git a/drivers/gpu/drm/tegra/dc.h b/drivers/gpu/drm/tegra/dc.h index 0c4d17851f47..3d8ddccd758f 100644 --- a/drivers/gpu/drm/tegra/dc.h +++ b/drivers/gpu/drm/tegra/dc.h @@ -90,8 +90,6 @@ struct tegra_dc { struct drm_info_list *debugfs_files; const struct tegra_dc_soc_info *soc; - - struct iommu_group *group; }; static inline struct tegra_dc * diff --git a/drivers/gpu/drm/tegra/dp.c b/drivers/gpu/drm/tegra/dp.c new file mode 100644 index 000000000000..70dfb7d1dec5 --- /dev/null +++ b/drivers/gpu/drm/tegra/dp.c @@ -0,0 +1,876 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright (C) 2013-2019 NVIDIA Corporation + * Copyright (C) 2015 Rob Clark + */ + +#include <drm/drm_crtc.h> +#include <drm/drm_dp_helper.h> +#include <drm/drm_print.h> + +#include "dp.h" + +static const u8 drm_dp_edp_revisions[] = { 0x11, 0x12, 0x13, 0x14 }; + +static void drm_dp_link_caps_reset(struct drm_dp_link_caps *caps) +{ + caps->enhanced_framing = false; + caps->tps3_supported = false; + caps->fast_training = false; + caps->channel_coding = false; + caps->alternate_scrambler_reset = false; +} + +void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest, + const struct drm_dp_link_caps *src) +{ + dest->enhanced_framing = src->enhanced_framing; + dest->tps3_supported = src->tps3_supported; + dest->fast_training = src->fast_training; + dest->channel_coding = src->channel_coding; + dest->alternate_scrambler_reset = src->alternate_scrambler_reset; +} + +static void drm_dp_link_reset(struct drm_dp_link *link) +{ + unsigned int i; + + if (!link) + return; + + link->revision = 0; + link->max_rate = 0; + link->max_lanes = 0; + + drm_dp_link_caps_reset(&link->caps); + link->aux_rd_interval.cr = 0; + link->aux_rd_interval.ce = 0; + link->edp = 0; + + link->rate = 0; + link->lanes = 0; + + for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) + link->rates[i] = 0; + + link->num_rates = 0; +} + +/** + * drm_dp_link_add_rate() - add a rate to the list of supported rates + * @link: the link to add the rate to + * @rate: the rate to add + * + * Add a link rate to the list of supported link rates. + * + * Returns: + * 0 on success or one of the following negative error codes on failure: + * - ENOSPC if the maximum number of supported rates has been reached + * - EEXISTS if the link already supports this rate + * + * See also: + * drm_dp_link_remove_rate() + */ +int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate) +{ + unsigned int i, pivot; + + if (link->num_rates == DP_MAX_SUPPORTED_RATES) + return -ENOSPC; + + for (pivot = 0; pivot < link->num_rates; pivot++) + if (rate <= link->rates[pivot]) + break; + + if (pivot != link->num_rates && rate == link->rates[pivot]) + return -EEXIST; + + for (i = link->num_rates; i > pivot; i--) + link->rates[i] = link->rates[i - 1]; + + link->rates[pivot] = rate; + link->num_rates++; + + return 0; +} + +/** + * drm_dp_link_remove_rate() - remove a rate from the list of supported rates + * @link: the link from which to remove the rate + * @rate: the rate to remove + * + * Removes a link rate from the list of supported link rates. + * + * Returns: + * 0 on success or one of the following negative error codes on failure: + * - EINVAL if the specified rate is not among the supported rates + * + * See also: + * drm_dp_link_add_rate() + */ +int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate) +{ + unsigned int i; + + for (i = 0; i < link->num_rates; i++) + if (rate == link->rates[i]) + break; + + if (i == link->num_rates) + return -EINVAL; + + link->num_rates--; + + while (i < link->num_rates) { + link->rates[i] = link->rates[i + 1]; + i++; + } + + return 0; +} + +/** + * drm_dp_link_update_rates() - normalize the supported link rates array + * @link: the link for which to normalize the supported link rates + * + * Users should call this function after they've manually modified the array + * of supported link rates. This function removes any stale entries, compacts + * the array and updates the supported link rate count. Note that calling the + * drm_dp_link_remove_rate() function already does this janitorial work. + * + * See also: + * drm_dp_link_add_rate(), drm_dp_link_remove_rate() + */ +void drm_dp_link_update_rates(struct drm_dp_link *link) +{ + unsigned int i, count = 0; + + for (i = 0; i < link->num_rates; i++) { + if (link->rates[i] != 0) + link->rates[count++] = link->rates[i]; + } + + for (i = count; i < link->num_rates; i++) + link->rates[i] = 0; + + link->num_rates = count; +} + +/** + * drm_dp_link_probe() - probe a DisplayPort link for capabilities + * @aux: DisplayPort AUX channel + * @link: pointer to structure in which to return link capabilities + * + * The structure filled in by this function can usually be passed directly + * into drm_dp_link_power_up() and drm_dp_link_configure() to power up and + * configure the link based on the link's capabilities. + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 dpcd[DP_RECEIVER_CAP_SIZE], value; + unsigned int rd_interval; + int err; + + drm_dp_link_reset(link); + + err = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, sizeof(dpcd)); + if (err < 0) + return err; + + link->revision = dpcd[DP_DPCD_REV]; + link->max_rate = drm_dp_max_link_rate(dpcd); + link->max_lanes = drm_dp_max_lane_count(dpcd); + + link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(dpcd); + link->caps.tps3_supported = drm_dp_tps3_supported(dpcd); + link->caps.fast_training = drm_dp_fast_training_cap(dpcd); + link->caps.channel_coding = drm_dp_channel_coding_supported(dpcd); + + if (drm_dp_alternate_scrambler_reset_cap(dpcd)) { + link->caps.alternate_scrambler_reset = true; + + err = drm_dp_dpcd_readb(aux, DP_EDP_DPCD_REV, &value); + if (err < 0) + return err; + + if (value >= ARRAY_SIZE(drm_dp_edp_revisions)) + DRM_ERROR("unsupported eDP version: %02x\n", value); + else + link->edp = drm_dp_edp_revisions[value]; + } + + /* + * The DPCD stores the AUX read interval in units of 4 ms. There are + * two special cases: + * + * 1) if the TRAINING_AUX_RD_INTERVAL field is 0, the clock recovery + * and channel equalization should use 100 us or 400 us AUX read + * intervals, respectively + * + * 2) for DP v1.4 and above, clock recovery should always use 100 us + * AUX read intervals + */ + rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] & + DP_TRAINING_AUX_RD_MASK; + + if (rd_interval > 4) { + DRM_DEBUG_KMS("AUX interval %u out of range (max. 4)\n", + rd_interval); + rd_interval = 4; + } + + rd_interval *= 4 * USEC_PER_MSEC; + + if (rd_interval == 0 || link->revision >= DP_DPCD_REV_14) + link->aux_rd_interval.cr = 100; + + if (rd_interval == 0) + link->aux_rd_interval.ce = 400; + + link->rate = link->max_rate; + link->lanes = link->max_lanes; + + /* Parse SUPPORTED_LINK_RATES from eDP 1.4 */ + if (link->edp >= 0x14) { + u8 supported_rates[DP_MAX_SUPPORTED_RATES * 2]; + unsigned int i; + u16 rate; + + err = drm_dp_dpcd_read(aux, DP_SUPPORTED_LINK_RATES, + supported_rates, + sizeof(supported_rates)); + if (err < 0) + return err; + + for (i = 0; i < DP_MAX_SUPPORTED_RATES; i++) { + rate = supported_rates[i * 2 + 1] << 8 | + supported_rates[i * 2 + 0]; + + drm_dp_link_add_rate(link, rate * 200); + } + } + + return 0; +} + +/** + * drm_dp_link_power_up() - power up a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + /* + * According to the DP 1.1 specification, a "Sink Device must exit the + * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink + * Control Field" (register 0x600). + */ + usleep_range(1000, 2000); + + return 0; +} + +/** + * drm_dp_link_power_down() - power down a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < 0x11) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D3; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + return 0; +} + +/** + * drm_dp_link_configure() - configure a DisplayPort link + * @aux: DisplayPort AUX channel + * @link: pointer to a structure containing the link configuration + * + * Returns 0 on success or a negative error code on failure. + */ +int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link) +{ + u8 values[2], value; + int err; + + if (link->ops && link->ops->configure) { + err = link->ops->configure(link); + if (err < 0) { + DRM_ERROR("failed to configure DP link: %d\n", err); + return err; + } + } + + values[0] = drm_dp_link_rate_to_bw_code(link->rate); + values[1] = link->lanes; + + if (link->caps.enhanced_framing) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); + if (err < 0) + return err; + + if (link->caps.channel_coding) + value = DP_SET_ANSI_8B10B; + else + value = 0; + + err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, value); + if (err < 0) + return err; + + if (link->caps.alternate_scrambler_reset) { + err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, + DP_ALTERNATE_SCRAMBLER_RESET_ENABLE); + if (err < 0) + return err; + } + + return 0; +} + +/** + * drm_dp_link_choose() - choose the lowest possible configuration for a mode + * @link: DRM DP link object + * @mode: DRM display mode + * @info: DRM display information + * + * According to the eDP specification, a source should select a configuration + * with the lowest number of lanes and the lowest possible link rate that can + * match the bitrate requirements of a video mode. However it must ensure not + * to exceed the capabilities of the sink. + * + * Returns: 0 on success or a negative error code on failure. + */ +int drm_dp_link_choose(struct drm_dp_link *link, + const struct drm_display_mode *mode, + const struct drm_display_info *info) +{ + /* available link symbol clock rates */ + static const unsigned int rates[3] = { 162000, 270000, 540000 }; + /* available number of lanes */ + static const unsigned int lanes[3] = { 1, 2, 4 }; + unsigned long requirement, capacity; + unsigned int rate = link->max_rate; + unsigned int i, j; + + /* bandwidth requirement */ + requirement = mode->clock * info->bpc * 3; + + for (i = 0; i < ARRAY_SIZE(lanes) && lanes[i] <= link->max_lanes; i++) { + for (j = 0; j < ARRAY_SIZE(rates) && rates[j] <= rate; j++) { + /* + * Capacity for this combination of lanes and rate, + * factoring in the ANSI 8B/10B encoding. + * + * Link rates in the DRM DP helpers are really link + * symbol frequencies, so a tenth of the actual rate + * of the link. + */ + capacity = lanes[i] * (rates[j] * 10) * 8 / 10; + + if (capacity >= requirement) { + DRM_DEBUG_KMS("using %u lanes at %u kHz (%lu/%lu kbps)\n", + lanes[i], rates[j], requirement, + capacity); + link->lanes = lanes[i]; + link->rate = rates[j]; + return 0; + } + } + } + + return -ERANGE; +} + +/** + * DOC: Link training + * + * These functions contain common logic and helpers to implement DisplayPort + * link training. + */ + +/** + * drm_dp_link_train_init() - initialize DisplayPort link training state + * @train: DisplayPort link training state + */ +void drm_dp_link_train_init(struct drm_dp_link_train *train) +{ + struct drm_dp_link_train_set *request = &train->request; + struct drm_dp_link_train_set *adjust = &train->adjust; + unsigned int i; + + for (i = 0; i < 4; i++) { + request->voltage_swing[i] = 0; + adjust->voltage_swing[i] = 0; + + request->pre_emphasis[i] = 0; + adjust->pre_emphasis[i] = 0; + + request->post_cursor[i] = 0; + adjust->post_cursor[i] = 0; + } + + train->pattern = DP_TRAINING_PATTERN_DISABLE; + train->clock_recovered = false; + train->channel_equalized = false; +} + +static bool drm_dp_link_train_valid(const struct drm_dp_link_train *train) +{ + return train->clock_recovered && train->channel_equalized; +} + +static int drm_dp_link_apply_training(struct drm_dp_link *link) +{ + struct drm_dp_link_train_set *request = &link->train.request; + unsigned int lanes = link->lanes, *vs, *pe, *pc, i; + struct drm_dp_aux *aux = link->aux; + u8 values[4], pattern = 0; + int err; + + err = link->ops->apply_training(link); + if (err < 0) { + DRM_ERROR("failed to apply link training: %d\n", err); + return err; + } + + vs = request->voltage_swing; + pe = request->pre_emphasis; + pc = request->post_cursor; + + /* write currently selected voltage-swing and pre-emphasis levels */ + for (i = 0; i < lanes; i++) + values[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL(vs[i]) | + DP_TRAIN_PRE_EMPHASIS_LEVEL(pe[i]); + + err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, lanes); + if (err < 0) { + DRM_ERROR("failed to set training parameters: %d\n", err); + return err; + } + + /* write currently selected post-cursor level (if supported) */ + if (link->revision >= 0x12 && link->rate == 540000) { + values[0] = values[1] = 0; + + for (i = 0; i < lanes; i++) + values[i / 2] |= DP_LANE_POST_CURSOR(i, pc[i]); + + err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_1_SET2, values, + DIV_ROUND_UP(lanes, 2)); + if (err < 0) { + DRM_ERROR("failed to set post-cursor: %d\n", err); + return err; + } + } + + /* write link pattern */ + if (link->train.pattern != DP_TRAINING_PATTERN_DISABLE) + pattern |= DP_LINK_SCRAMBLING_DISABLE; + + pattern |= link->train.pattern; + + err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern); + if (err < 0) { + DRM_ERROR("failed to set training pattern: %d\n", err); + return err; + } + + return 0; +} + +static void drm_dp_link_train_wait(struct drm_dp_link *link) +{ + unsigned long min = 0; + + switch (link->train.pattern) { + case DP_TRAINING_PATTERN_1: + min = link->aux_rd_interval.cr; + break; + + case DP_TRAINING_PATTERN_2: + case DP_TRAINING_PATTERN_3: + min = link->aux_rd_interval.ce; + break; + + default: + break; + } + + if (min > 0) + usleep_range(min, 2 * min); +} + +static void drm_dp_link_get_adjustments(struct drm_dp_link *link, + u8 status[DP_LINK_STATUS_SIZE]) +{ + struct drm_dp_link_train_set *adjust = &link->train.adjust; + unsigned int i; + + for (i = 0; i < link->lanes; i++) { + adjust->voltage_swing[i] = + drm_dp_get_adjust_request_voltage(status, i) >> + DP_TRAIN_VOLTAGE_SWING_SHIFT; + + adjust->pre_emphasis[i] = + drm_dp_get_adjust_request_pre_emphasis(status, i) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + + adjust->post_cursor[i] = + drm_dp_get_adjust_request_post_cursor(status, i); + } +} + +static void drm_dp_link_train_adjust(struct drm_dp_link_train *train) +{ + struct drm_dp_link_train_set *request = &train->request; + struct drm_dp_link_train_set *adjust = &train->adjust; + unsigned int i; + + for (i = 0; i < 4; i++) + if (request->voltage_swing[i] != adjust->voltage_swing[i]) + request->voltage_swing[i] = adjust->voltage_swing[i]; + + for (i = 0; i < 4; i++) + if (request->pre_emphasis[i] != adjust->pre_emphasis[i]) + request->pre_emphasis[i] = adjust->pre_emphasis[i]; + + for (i = 0; i < 4; i++) + if (request->post_cursor[i] != adjust->post_cursor[i]) + request->post_cursor[i] = adjust->post_cursor[i]; +} + +static int drm_dp_link_recover_clock(struct drm_dp_link *link) +{ + u8 status[DP_LINK_STATUS_SIZE]; + int err; + + err = drm_dp_link_apply_training(link); + if (err < 0) + return err; + + drm_dp_link_train_wait(link); + + err = drm_dp_dpcd_read_link_status(link->aux, status); + if (err < 0) { + DRM_ERROR("failed to read link status: %d\n", err); + return err; + } + + if (!drm_dp_clock_recovery_ok(status, link->lanes)) + drm_dp_link_get_adjustments(link, status); + else + link->train.clock_recovered = true; + + return 0; +} + +static int drm_dp_link_clock_recovery(struct drm_dp_link *link) +{ + unsigned int repeat; + int err; + + /* start clock recovery using training pattern 1 */ + link->train.pattern = DP_TRAINING_PATTERN_1; + + for (repeat = 1; repeat < 5; repeat++) { + err = drm_dp_link_recover_clock(link); + if (err < 0) { + DRM_ERROR("failed to recover clock: %d\n", err); + return err; + } + + if (link->train.clock_recovered) + break; + + drm_dp_link_train_adjust(&link->train); + } + + return 0; +} + +static int drm_dp_link_equalize_channel(struct drm_dp_link *link) +{ + struct drm_dp_aux *aux = link->aux; + u8 status[DP_LINK_STATUS_SIZE]; + int err; + + err = drm_dp_link_apply_training(link); + if (err < 0) + return err; + + drm_dp_link_train_wait(link); + + err = drm_dp_dpcd_read_link_status(aux, status); + if (err < 0) { + DRM_ERROR("failed to read link status: %d\n", err); + return err; + } + + if (!drm_dp_clock_recovery_ok(status, link->lanes)) { + DRM_ERROR("clock recovery lost while equalizing channel\n"); + link->train.clock_recovered = false; + return 0; + } + + if (!drm_dp_channel_eq_ok(status, link->lanes)) + drm_dp_link_get_adjustments(link, status); + else + link->train.channel_equalized = true; + + return 0; +} + +static int drm_dp_link_channel_equalization(struct drm_dp_link *link) +{ + unsigned int repeat; + int err; + + /* start channel equalization using pattern 2 or 3 */ + if (link->caps.tps3_supported) + link->train.pattern = DP_TRAINING_PATTERN_3; + else + link->train.pattern = DP_TRAINING_PATTERN_2; + + for (repeat = 1; repeat < 5; repeat++) { + err = drm_dp_link_equalize_channel(link); + if (err < 0) { + DRM_ERROR("failed to equalize channel: %d\n", err); + return err; + } + + if (link->train.channel_equalized) + break; + + drm_dp_link_train_adjust(&link->train); + } + + return 0; +} + +static int drm_dp_link_downgrade(struct drm_dp_link *link) +{ + switch (link->rate) { + case 162000: + return -EINVAL; + + case 270000: + link->rate = 162000; + break; + + case 540000: + link->rate = 270000; + return 0; + } + + return 0; +} + +static void drm_dp_link_train_disable(struct drm_dp_link *link) +{ + int err; + + link->train.pattern = DP_TRAINING_PATTERN_DISABLE; + + err = drm_dp_link_apply_training(link); + if (err < 0) + DRM_ERROR("failed to disable link training: %d\n", err); +} + +static int drm_dp_link_train_full(struct drm_dp_link *link) +{ + int err; + +retry: + DRM_DEBUG_KMS("full-training link: %u lane%s at %u MHz\n", + link->lanes, (link->lanes > 1) ? "s" : "", + link->rate / 100); + + err = drm_dp_link_configure(link->aux, link); + if (err < 0) { + DRM_ERROR("failed to configure DP link: %d\n", err); + return err; + } + + err = drm_dp_link_clock_recovery(link); + if (err < 0) { + DRM_ERROR("clock recovery failed: %d\n", err); + goto out; + } + + if (!link->train.clock_recovered) { + DRM_ERROR("clock recovery failed, downgrading link\n"); + + err = drm_dp_link_downgrade(link); + if (err < 0) + goto out; + + goto retry; + } + + DRM_DEBUG_KMS("clock recovery succeeded\n"); + + err = drm_dp_link_channel_equalization(link); + if (err < 0) { + DRM_ERROR("channel equalization failed: %d\n", err); + goto out; + } + + if (!link->train.channel_equalized) { + DRM_ERROR("channel equalization failed, downgrading link\n"); + + err = drm_dp_link_downgrade(link); + if (err < 0) + goto out; + + goto retry; + } + + DRM_DEBUG_KMS("channel equalization succeeded\n"); + +out: + drm_dp_link_train_disable(link); + return err; +} + +static int drm_dp_link_train_fast(struct drm_dp_link *link) +{ + u8 status[DP_LINK_STATUS_SIZE]; + int err; + + DRM_DEBUG_KMS("fast-training link: %u lane%s at %u MHz\n", + link->lanes, (link->lanes > 1) ? "s" : "", + link->rate / 100); + + err = drm_dp_link_configure(link->aux, link); + if (err < 0) { + DRM_ERROR("failed to configure DP link: %d\n", err); + return err; + } + + /* transmit training pattern 1 for 500 microseconds */ + link->train.pattern = DP_TRAINING_PATTERN_1; + + err = drm_dp_link_apply_training(link); + if (err < 0) + goto out; + + usleep_range(500, 1000); + + /* transmit training pattern 2 or 3 for 500 microseconds */ + if (link->caps.tps3_supported) + link->train.pattern = DP_TRAINING_PATTERN_3; + else + link->train.pattern = DP_TRAINING_PATTERN_2; + + err = drm_dp_link_apply_training(link); + if (err < 0) + goto out; + + usleep_range(500, 1000); + + err = drm_dp_dpcd_read_link_status(link->aux, status); + if (err < 0) { + DRM_ERROR("failed to read link status: %d\n", err); + goto out; + } + + if (!drm_dp_clock_recovery_ok(status, link->lanes)) { + DRM_ERROR("clock recovery failed\n"); + err = -EIO; + } + + if (!drm_dp_channel_eq_ok(status, link->lanes)) { + DRM_ERROR("channel equalization failed\n"); + err = -EIO; + } + +out: + drm_dp_link_train_disable(link); + return err; +} + +/** + * drm_dp_link_train() - perform DisplayPort link training + * @link: a DP link object + * + * Uses the context stored in the DP link object to perform link training. It + * is expected that drivers will call drm_dp_link_probe() to obtain the link + * capabilities before performing link training. + * + * If the sink supports fast link training (no AUX CH handshake) and valid + * training settings are available, this function will try to perform fast + * link training and fall back to full link training on failure. + * + * Returns: 0 on success or a negative error code on failure. + */ +int drm_dp_link_train(struct drm_dp_link *link) +{ + int err; + + drm_dp_link_train_init(&link->train); + + if (link->caps.fast_training) { + if (drm_dp_link_train_valid(&link->train)) { + err = drm_dp_link_train_fast(link); + if (err < 0) + DRM_ERROR("fast link training failed: %d\n", + err); + else + return 0; + } else { + DRM_DEBUG_KMS("training parameters not available\n"); + } + } else { + DRM_DEBUG_KMS("fast link training not supported\n"); + } + + err = drm_dp_link_train_full(link); + if (err < 0) + DRM_ERROR("full link training failed: %d\n", err); + + return err; +} diff --git a/drivers/gpu/drm/tegra/dp.h b/drivers/gpu/drm/tegra/dp.h new file mode 100644 index 000000000000..cb12ed0c54e7 --- /dev/null +++ b/drivers/gpu/drm/tegra/dp.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright (C) 2013-2019 NVIDIA Corporation. + * Copyright (C) 2015 Rob Clark + */ + +#ifndef DRM_TEGRA_DP_H +#define DRM_TEGRA_DP_H 1 + +#include <linux/types.h> + +struct drm_display_info; +struct drm_display_mode; +struct drm_dp_aux; +struct drm_dp_link; + +/** + * struct drm_dp_link_caps - DP link capabilities + */ +struct drm_dp_link_caps { + /** + * @enhanced_framing: + * + * enhanced framing capability (mandatory as of DP 1.2) + */ + bool enhanced_framing; + + /** + * tps3_supported: + * + * training pattern sequence 3 supported for equalization + */ + bool tps3_supported; + + /** + * @fast_training: + * + * AUX CH handshake not required for link training + */ + bool fast_training; + + /** + * @channel_coding: + * + * ANSI 8B/10B channel coding capability + */ + bool channel_coding; + + /** + * @alternate_scrambler_reset: + * + * eDP alternate scrambler reset capability + */ + bool alternate_scrambler_reset; +}; + +void drm_dp_link_caps_copy(struct drm_dp_link_caps *dest, + const struct drm_dp_link_caps *src); + +/** + * struct drm_dp_link_ops - DP link operations + */ +struct drm_dp_link_ops { + /** + * @apply_training: + */ + int (*apply_training)(struct drm_dp_link *link); + + /** + * @configure: + */ + int (*configure)(struct drm_dp_link *link); +}; + +#define DP_TRAIN_VOLTAGE_SWING_LEVEL(x) ((x) << 0) +#define DP_TRAIN_PRE_EMPHASIS_LEVEL(x) ((x) << 3) +#define DP_LANE_POST_CURSOR(i, x) (((x) & 0x3) << (((i) & 1) << 2)) + +/** + * struct drm_dp_link_train_set - link training settings + * @voltage_swing: per-lane voltage swing + * @pre_emphasis: per-lane pre-emphasis + * @post_cursor: per-lane post-cursor + */ +struct drm_dp_link_train_set { + unsigned int voltage_swing[4]; + unsigned int pre_emphasis[4]; + unsigned int post_cursor[4]; +}; + +/** + * struct drm_dp_link_train - link training state information + * @request: currently requested settings + * @adjust: adjustments requested by sink + * @pattern: currently requested training pattern + * @clock_recovered: flag to track if clock recovery has completed + * @channel_equalized: flag to track if channel equalization has completed + */ +struct drm_dp_link_train { + struct drm_dp_link_train_set request; + struct drm_dp_link_train_set adjust; + + unsigned int pattern; + + bool clock_recovered; + bool channel_equalized; +}; + +/** + * struct drm_dp_link - DP link capabilities and configuration + * @revision: DP specification revision supported on the link + * @max_rate: maximum clock rate supported on the link + * @max_lanes: maximum number of lanes supported on the link + * @caps: capabilities supported on the link (see &drm_dp_link_caps) + * @aux_rd_interval: AUX read interval to use for training (in microseconds) + * @edp: eDP revision (0x11: eDP 1.1, 0x12: eDP 1.2, ...) + * @rate: currently configured link rate + * @lanes: currently configured number of lanes + * @rates: additional supported link rates in kHz (eDP 1.4) + * @num_rates: number of additional supported link rates (eDP 1.4) + */ +struct drm_dp_link { + unsigned char revision; + unsigned int max_rate; + unsigned int max_lanes; + + struct drm_dp_link_caps caps; + + /** + * @cr: clock recovery read interval + * @ce: channel equalization read interval + */ + struct { + unsigned int cr; + unsigned int ce; + } aux_rd_interval; + + unsigned char edp; + + unsigned int rate; + unsigned int lanes; + + unsigned long rates[DP_MAX_SUPPORTED_RATES]; + unsigned int num_rates; + + /** + * @ops: DP link operations + */ + const struct drm_dp_link_ops *ops; + + /** + * @aux: DP AUX channel + */ + struct drm_dp_aux *aux; + + /** + * @train: DP link training state + */ + struct drm_dp_link_train train; +}; + +int drm_dp_link_add_rate(struct drm_dp_link *link, unsigned long rate); +int drm_dp_link_remove_rate(struct drm_dp_link *link, unsigned long rate); +void drm_dp_link_update_rates(struct drm_dp_link *link); + +int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link); +int drm_dp_link_choose(struct drm_dp_link *link, + const struct drm_display_mode *mode, + const struct drm_display_info *info); + +void drm_dp_link_train_init(struct drm_dp_link_train *train); +int drm_dp_link_train(struct drm_dp_link *link); + +#endif diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c index a0f6f9b0d258..622cdf1ad246 100644 --- a/drivers/gpu/drm/tegra/dpaux.c +++ b/drivers/gpu/drm/tegra/dpaux.c @@ -9,6 +9,7 @@ #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinctrl.h> @@ -22,6 +23,7 @@ #include <drm/drm_dp_helper.h> #include <drm/drm_panel.h> +#include "dp.h" #include "dpaux.h" #include "drm.h" #include "trace.h" @@ -29,10 +31,18 @@ static DEFINE_MUTEX(dpaux_lock); static LIST_HEAD(dpaux_list); +struct tegra_dpaux_soc { + unsigned int cmh; + unsigned int drvz; + unsigned int drvi; +}; + struct tegra_dpaux { struct drm_dp_aux aux; struct device *dev; + const struct tegra_dpaux_soc *soc; + void __iomem *regs; int irq; @@ -120,6 +130,7 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux, struct tegra_dpaux *dpaux = to_dpaux(aux); unsigned long status; ssize_t ret = 0; + u8 reply = 0; u32 value; /* Tegra has 4x4 byte DP AUX transmit and receive FIFOs. */ @@ -214,23 +225,23 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux, switch ((value & DPAUX_DP_AUXSTAT_REPLY_TYPE_MASK) >> 16) { case 0x00: - msg->reply = DP_AUX_NATIVE_REPLY_ACK; + reply = DP_AUX_NATIVE_REPLY_ACK; break; case 0x01: - msg->reply = DP_AUX_NATIVE_REPLY_NACK; + reply = DP_AUX_NATIVE_REPLY_NACK; break; case 0x02: - msg->reply = DP_AUX_NATIVE_REPLY_DEFER; + reply = DP_AUX_NATIVE_REPLY_DEFER; break; case 0x04: - msg->reply = DP_AUX_I2C_REPLY_NACK; + reply = DP_AUX_I2C_REPLY_NACK; break; case 0x08: - msg->reply = DP_AUX_I2C_REPLY_DEFER; + reply = DP_AUX_I2C_REPLY_DEFER; break; } @@ -238,14 +249,24 @@ static ssize_t tegra_dpaux_transfer(struct drm_dp_aux *aux, if (msg->request & DP_AUX_I2C_READ) { size_t count = value & DPAUX_DP_AUXSTAT_REPLY_MASK; - if (WARN_ON(count != msg->size)) - count = min_t(size_t, count, msg->size); + /* + * There might be a smarter way to do this, but since + * the DP helpers will already retry transactions for + * an -EBUSY return value, simply reuse that instead. + */ + if (count != msg->size) { + ret = -EBUSY; + goto out; + } tegra_dpaux_read_fifo(dpaux, msg->buffer, count); ret = count; } } + msg->reply = reply; + +out: return ret; } @@ -310,9 +331,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function) switch (function) { case DPAUX_PADCTL_FUNC_AUX: - value = DPAUX_HYBRID_PADCTL_AUX_CMH(2) | - DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) | - DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) | + value = DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) | + DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) | + DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) | DPAUX_HYBRID_PADCTL_AUX_INPUT_RCV | DPAUX_HYBRID_PADCTL_MODE_AUX; break; @@ -320,9 +341,9 @@ static int tegra_dpaux_pad_config(struct tegra_dpaux *dpaux, unsigned function) case DPAUX_PADCTL_FUNC_I2C: value = DPAUX_HYBRID_PADCTL_I2C_SDA_INPUT_RCV | DPAUX_HYBRID_PADCTL_I2C_SCL_INPUT_RCV | - DPAUX_HYBRID_PADCTL_AUX_CMH(2) | - DPAUX_HYBRID_PADCTL_AUX_DRVZ(4) | - DPAUX_HYBRID_PADCTL_AUX_DRVI(0x18) | + DPAUX_HYBRID_PADCTL_AUX_CMH(dpaux->soc->cmh) | + DPAUX_HYBRID_PADCTL_AUX_DRVZ(dpaux->soc->drvz) | + DPAUX_HYBRID_PADCTL_AUX_DRVI(dpaux->soc->drvi) | DPAUX_HYBRID_PADCTL_MODE_I2C; break; @@ -436,6 +457,7 @@ static int tegra_dpaux_probe(struct platform_device *pdev) if (!dpaux) return -ENOMEM; + dpaux->soc = of_device_get_match_data(&pdev->dev); INIT_WORK(&dpaux->work, tegra_dpaux_hotplug); init_completion(&dpaux->complete); INIT_LIST_HEAD(&dpaux->list); @@ -493,6 +515,8 @@ static int tegra_dpaux_probe(struct platform_device *pdev) return PTR_ERR(dpaux->vdd); } + + dpaux->vdd = NULL; } platform_set_drvdata(pdev, dpaux); @@ -641,11 +665,29 @@ static const struct dev_pm_ops tegra_dpaux_pm_ops = { SET_RUNTIME_PM_OPS(tegra_dpaux_suspend, tegra_dpaux_resume, NULL) }; +static const struct tegra_dpaux_soc tegra124_dpaux_soc = { + .cmh = 0x02, + .drvz = 0x04, + .drvi = 0x18, +}; + +static const struct tegra_dpaux_soc tegra210_dpaux_soc = { + .cmh = 0x02, + .drvz = 0x04, + .drvi = 0x30, +}; + +static const struct tegra_dpaux_soc tegra194_dpaux_soc = { + .cmh = 0x02, + .drvz = 0x04, + .drvi = 0x2c, +}; + static const struct of_device_id tegra_dpaux_of_match[] = { - { .compatible = "nvidia,tegra194-dpaux", }, - { .compatible = "nvidia,tegra186-dpaux", }, - { .compatible = "nvidia,tegra210-dpaux", }, - { .compatible = "nvidia,tegra124-dpaux", }, + { .compatible = "nvidia,tegra194-dpaux", .data = &tegra194_dpaux_soc }, + { .compatible = "nvidia,tegra186-dpaux", .data = &tegra210_dpaux_soc }, + { .compatible = "nvidia,tegra210-dpaux", .data = &tegra210_dpaux_soc }, + { .compatible = "nvidia,tegra124-dpaux", .data = &tegra124_dpaux_soc }, { }, }; MODULE_DEVICE_TABLE(of, tegra_dpaux_of_match); @@ -686,25 +728,32 @@ int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output) output->connector.polled = DRM_CONNECTOR_POLL_HPD; dpaux->output = output; - err = regulator_enable(dpaux->vdd); - if (err < 0) - return err; + if (output->panel) { + enum drm_connector_status status; - timeout = jiffies + msecs_to_jiffies(250); + if (dpaux->vdd) { + err = regulator_enable(dpaux->vdd); + if (err < 0) + return err; + } - while (time_before(jiffies, timeout)) { - enum drm_connector_status status; + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + status = drm_dp_aux_detect(aux); + + if (status == connector_status_connected) + break; - status = drm_dp_aux_detect(aux); - if (status == connector_status_connected) { - enable_irq(dpaux->irq); - return 0; + usleep_range(1000, 2000); } - usleep_range(1000, 2000); + if (status != connector_status_connected) + return -ETIMEDOUT; } - return -ETIMEDOUT; + enable_irq(dpaux->irq); + return 0; } int drm_dp_aux_detach(struct drm_dp_aux *aux) @@ -715,25 +764,33 @@ int drm_dp_aux_detach(struct drm_dp_aux *aux) disable_irq(dpaux->irq); - err = regulator_disable(dpaux->vdd); - if (err < 0) - return err; + if (dpaux->output->panel) { + enum drm_connector_status status; - timeout = jiffies + msecs_to_jiffies(250); + if (dpaux->vdd) { + err = regulator_disable(dpaux->vdd); + if (err < 0) + return err; + } - while (time_before(jiffies, timeout)) { - enum drm_connector_status status; + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + status = drm_dp_aux_detect(aux); + + if (status == connector_status_disconnected) + break; - status = drm_dp_aux_detect(aux); - if (status == connector_status_disconnected) { - dpaux->output = NULL; - return 0; + usleep_range(1000, 2000); } - usleep_range(1000, 2000); + if (status != connector_status_disconnected) + return -ETIMEDOUT; + + dpaux->output = NULL; } - return -ETIMEDOUT; + return 0; } enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux) @@ -764,72 +821,3 @@ int drm_dp_aux_disable(struct drm_dp_aux *aux) return 0; } - -int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding) -{ - int err; - - err = drm_dp_dpcd_writeb(aux, DP_MAIN_LINK_CHANNEL_CODING_SET, - encoding); - if (err < 0) - return err; - - return 0; -} - -int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link, - u8 pattern) -{ - u8 tp = pattern & DP_TRAINING_PATTERN_MASK; - u8 status[DP_LINK_STATUS_SIZE], values[4]; - unsigned int i; - int err; - - err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, pattern); - if (err < 0) - return err; - - if (tp == DP_TRAINING_PATTERN_DISABLE) - return 0; - - for (i = 0; i < link->num_lanes; i++) - values[i] = DP_TRAIN_MAX_PRE_EMPHASIS_REACHED | - DP_TRAIN_PRE_EMPH_LEVEL_0 | - DP_TRAIN_MAX_SWING_REACHED | - DP_TRAIN_VOLTAGE_SWING_LEVEL_0; - - err = drm_dp_dpcd_write(aux, DP_TRAINING_LANE0_SET, values, - link->num_lanes); - if (err < 0) - return err; - - usleep_range(500, 1000); - - err = drm_dp_dpcd_read_link_status(aux, status); - if (err < 0) - return err; - - switch (tp) { - case DP_TRAINING_PATTERN_1: - if (!drm_dp_clock_recovery_ok(status, link->num_lanes)) - return -EAGAIN; - - break; - - case DP_TRAINING_PATTERN_2: - if (!drm_dp_channel_eq_ok(status, link->num_lanes)) - return -EAGAIN; - - break; - - default: - dev_err(aux->dev, "unsupported training pattern %u\n", tp); - return -EINVAL; - } - - err = drm_dp_dpcd_writeb(aux, DP_EDP_CONFIGURATION_SET, 0); - if (err < 0) - return err; - - return 0; -} diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 6fb7d74ff553..56e5e7a5c108 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -20,10 +20,6 @@ #include <drm/drm_prime.h> #include <drm/drm_vblank.h> -#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) -#include <asm/dma-iommu.h> -#endif - #include "drm.h" #include "gem.h" @@ -86,168 +82,6 @@ tegra_drm_mode_config_helpers = { .atomic_commit_tail = tegra_atomic_commit_tail, }; -static int tegra_drm_load(struct drm_device *drm, unsigned long flags) -{ - struct host1x_device *device = to_host1x_device(drm->dev); - struct tegra_drm *tegra; - int err; - - tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); - if (!tegra) - return -ENOMEM; - - if (iommu_present(&platform_bus_type)) { - tegra->domain = iommu_domain_alloc(&platform_bus_type); - if (!tegra->domain) { - err = -ENOMEM; - goto free; - } - - err = iova_cache_get(); - if (err < 0) - goto domain; - } - - mutex_init(&tegra->clients_lock); - INIT_LIST_HEAD(&tegra->clients); - - drm->dev_private = tegra; - tegra->drm = drm; - - drm_mode_config_init(drm); - - drm->mode_config.min_width = 0; - drm->mode_config.min_height = 0; - - drm->mode_config.max_width = 4096; - drm->mode_config.max_height = 4096; - - drm->mode_config.allow_fb_modifiers = true; - - drm->mode_config.normalize_zpos = true; - - drm->mode_config.funcs = &tegra_drm_mode_config_funcs; - drm->mode_config.helper_private = &tegra_drm_mode_config_helpers; - - err = tegra_drm_fb_prepare(drm); - if (err < 0) - goto config; - - drm_kms_helper_poll_init(drm); - - err = host1x_device_init(device); - if (err < 0) - goto fbdev; - - if (tegra->domain) { - u64 carveout_start, carveout_end, gem_start, gem_end; - u64 dma_mask = dma_get_mask(&device->dev); - dma_addr_t start, end; - unsigned long order; - - start = tegra->domain->geometry.aperture_start & dma_mask; - end = tegra->domain->geometry.aperture_end & dma_mask; - - gem_start = start; - gem_end = end - CARVEOUT_SZ; - carveout_start = gem_end + 1; - carveout_end = end; - - order = __ffs(tegra->domain->pgsize_bitmap); - init_iova_domain(&tegra->carveout.domain, 1UL << order, - carveout_start >> order); - - tegra->carveout.shift = iova_shift(&tegra->carveout.domain); - tegra->carveout.limit = carveout_end >> tegra->carveout.shift; - - drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); - mutex_init(&tegra->mm_lock); - - DRM_DEBUG("IOMMU apertures:\n"); - DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end); - DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start, - carveout_end); - } - - if (tegra->hub) { - err = tegra_display_hub_prepare(tegra->hub); - if (err < 0) - goto device; - } - - /* - * We don't use the drm_irq_install() helpers provided by the DRM - * core, so we need to set this manually in order to allow the - * DRM_IOCTL_WAIT_VBLANK to operate correctly. - */ - drm->irq_enabled = true; - - /* syncpoints are used for full 32-bit hardware VBLANK counters */ - drm->max_vblank_count = 0xffffffff; - - err = drm_vblank_init(drm, drm->mode_config.num_crtc); - if (err < 0) - goto hub; - - drm_mode_config_reset(drm); - - err = tegra_drm_fb_init(drm); - if (err < 0) - goto hub; - - return 0; - -hub: - if (tegra->hub) - tegra_display_hub_cleanup(tegra->hub); -device: - host1x_device_exit(device); -fbdev: - drm_kms_helper_poll_fini(drm); - tegra_drm_fb_free(drm); -config: - drm_mode_config_cleanup(drm); - - if (tegra->domain) { - mutex_destroy(&tegra->mm_lock); - drm_mm_takedown(&tegra->mm); - put_iova_domain(&tegra->carveout.domain); - iova_cache_put(); - } -domain: - if (tegra->domain) - iommu_domain_free(tegra->domain); -free: - kfree(tegra); - return err; -} - -static void tegra_drm_unload(struct drm_device *drm) -{ - struct host1x_device *device = to_host1x_device(drm->dev); - struct tegra_drm *tegra = drm->dev_private; - int err; - - drm_kms_helper_poll_fini(drm); - tegra_drm_fb_exit(drm); - drm_atomic_helper_shutdown(drm); - drm_mode_config_cleanup(drm); - - err = host1x_device_exit(device); - if (err < 0) - return; - - if (tegra->domain) { - mutex_destroy(&tegra->mm_lock); - drm_mm_takedown(&tegra->mm); - put_iova_domain(&tegra->carveout.domain); - iova_cache_put(); - iommu_domain_free(tegra->domain); - } - - kfree(tegra); -} - static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp) { struct tegra_drm_file *fpriv; @@ -311,6 +145,8 @@ static int host1x_reloc_copy_from_user(struct host1x_reloc *dest, if (err < 0) return err; + dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE; + dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf); if (!dest->cmdbuf.bo) return -ENOENT; @@ -1014,8 +850,6 @@ static int tegra_debugfs_init(struct drm_minor *minor) static struct drm_driver tegra_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC | DRIVER_RENDER, - .load = tegra_drm_load, - .unload = tegra_drm_unload, .open = tegra_drm_open, .postclose = tegra_drm_postclose, .lastclose = drm_fb_helper_lastclose, @@ -1068,57 +902,63 @@ int tegra_drm_unregister_client(struct tegra_drm *tegra, return 0; } -struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client, - bool shared) +int host1x_client_iommu_attach(struct host1x_client *client) { + struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev); struct drm_device *drm = dev_get_drvdata(client->parent); struct tegra_drm *tegra = drm->dev_private; struct iommu_group *group = NULL; int err; + /* + * If the host1x client is already attached to an IOMMU domain that is + * not the shared IOMMU domain, don't try to attach it to a different + * domain. This allows using the IOMMU-backed DMA API. + */ + if (domain && domain != tegra->domain) + return 0; + if (tegra->domain) { group = iommu_group_get(client->dev); if (!group) { dev_err(client->dev, "failed to get IOMMU group\n"); - return ERR_PTR(-ENODEV); + return -ENODEV; } - if (!shared || (shared && (group != tegra->group))) { -#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU) - if (client->dev->archdata.mapping) { - struct dma_iommu_mapping *mapping = - to_dma_iommu_mapping(client->dev); - arm_iommu_detach_device(client->dev); - arm_iommu_release_mapping(mapping); - } -#endif + if (domain != tegra->domain) { err = iommu_attach_group(tegra->domain, group); if (err < 0) { iommu_group_put(group); - return ERR_PTR(err); + return err; } - - if (shared && !tegra->group) - tegra->group = group; } + + tegra->use_explicit_iommu = true; } - return group; + client->group = group; + + return 0; } -void host1x_client_iommu_detach(struct host1x_client *client, - struct iommu_group *group) +void host1x_client_iommu_detach(struct host1x_client *client) { struct drm_device *drm = dev_get_drvdata(client->parent); struct tegra_drm *tegra = drm->dev_private; + struct iommu_domain *domain; - if (group) { - if (group == tegra->group) { - iommu_detach_group(tegra->domain, group); - tegra->group = NULL; - } + if (client->group) { + /* + * Devices that are part of the same group may no longer be + * attached to a domain at this point because their group may + * have been detached by an earlier client. + */ + domain = iommu_get_domain_for_dev(client->dev); + if (domain) + iommu_detach_group(tegra->domain, client->group); - iommu_group_put(group); + iommu_group_put(client->group); + client->group = NULL; } } @@ -1202,6 +1042,8 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt, static int host1x_drm_probe(struct host1x_device *dev) { struct drm_driver *driver = &tegra_drm_driver; + struct iommu_domain *domain; + struct tegra_drm *tegra; struct drm_device *drm; int err; @@ -1209,18 +1051,180 @@ static int host1x_drm_probe(struct host1x_device *dev) if (IS_ERR(drm)) return PTR_ERR(drm); + tegra = kzalloc(sizeof(*tegra), GFP_KERNEL); + if (!tegra) { + err = -ENOMEM; + goto put; + } + + /* + * If the Tegra DRM clients are backed by an IOMMU, push buffers are + * likely to be allocated beyond the 32-bit boundary if sufficient + * system memory is available. This is problematic on earlier Tegra + * generations where host1x supports a maximum of 32 address bits in + * the GATHER opcode. In this case, unless host1x is behind an IOMMU + * as well it won't be able to process buffers allocated beyond the + * 32-bit boundary. + * + * The DMA API will use bounce buffers in this case, so that could + * perhaps still be made to work, even if less efficient, but there + * is another catch: in order to perform cache maintenance on pages + * allocated for discontiguous buffers we need to map and unmap the + * SG table representing these buffers. This is fine for something + * small like a push buffer, but it exhausts the bounce buffer pool + * (typically on the order of a few MiB) for framebuffers (many MiB + * for any modern resolution). + * + * Work around this by making sure that Tegra DRM clients only use + * an IOMMU if the parent host1x also uses an IOMMU. + * + * Note that there's still a small gap here that we don't cover: if + * the DMA API is backed by an IOMMU there's no way to control which + * device is attached to an IOMMU and which isn't, except via wiring + * up the device tree appropriately. This is considered an problem + * of integration, so care must be taken for the DT to be consistent. + */ + domain = iommu_get_domain_for_dev(drm->dev->parent); + + if (domain && iommu_present(&platform_bus_type)) { + tegra->domain = iommu_domain_alloc(&platform_bus_type); + if (!tegra->domain) { + err = -ENOMEM; + goto free; + } + + err = iova_cache_get(); + if (err < 0) + goto domain; + } + + mutex_init(&tegra->clients_lock); + INIT_LIST_HEAD(&tegra->clients); + dev_set_drvdata(&dev->dev, drm); + drm->dev_private = tegra; + tegra->drm = drm; + + drm_mode_config_init(drm); - err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false); + drm->mode_config.min_width = 0; + drm->mode_config.min_height = 0; + + drm->mode_config.max_width = 4096; + drm->mode_config.max_height = 4096; + + drm->mode_config.allow_fb_modifiers = true; + + drm->mode_config.normalize_zpos = true; + + drm->mode_config.funcs = &tegra_drm_mode_config_funcs; + drm->mode_config.helper_private = &tegra_drm_mode_config_helpers; + + err = tegra_drm_fb_prepare(drm); if (err < 0) - goto put; + goto config; + + drm_kms_helper_poll_init(drm); + + err = host1x_device_init(dev); + if (err < 0) + goto fbdev; + + if (tegra->use_explicit_iommu) { + u64 carveout_start, carveout_end, gem_start, gem_end; + u64 dma_mask = dma_get_mask(&dev->dev); + dma_addr_t start, end; + unsigned long order; + + start = tegra->domain->geometry.aperture_start & dma_mask; + end = tegra->domain->geometry.aperture_end & dma_mask; + + gem_start = start; + gem_end = end - CARVEOUT_SZ; + carveout_start = gem_end + 1; + carveout_end = end; + + order = __ffs(tegra->domain->pgsize_bitmap); + init_iova_domain(&tegra->carveout.domain, 1UL << order, + carveout_start >> order); + + tegra->carveout.shift = iova_shift(&tegra->carveout.domain); + tegra->carveout.limit = carveout_end >> tegra->carveout.shift; + + drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1); + mutex_init(&tegra->mm_lock); + + DRM_DEBUG_DRIVER("IOMMU apertures:\n"); + DRM_DEBUG_DRIVER(" GEM: %#llx-%#llx\n", gem_start, gem_end); + DRM_DEBUG_DRIVER(" Carveout: %#llx-%#llx\n", carveout_start, + carveout_end); + } else if (tegra->domain) { + iommu_domain_free(tegra->domain); + tegra->domain = NULL; + iova_cache_put(); + } + + if (tegra->hub) { + err = tegra_display_hub_prepare(tegra->hub); + if (err < 0) + goto device; + } + + /* + * We don't use the drm_irq_install() helpers provided by the DRM + * core, so we need to set this manually in order to allow the + * DRM_IOCTL_WAIT_VBLANK to operate correctly. + */ + drm->irq_enabled = true; + + /* syncpoints are used for full 32-bit hardware VBLANK counters */ + drm->max_vblank_count = 0xffffffff; + + err = drm_vblank_init(drm, drm->mode_config.num_crtc); + if (err < 0) + goto hub; + + drm_mode_config_reset(drm); + + err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", + false); + if (err < 0) + goto hub; + + err = tegra_drm_fb_init(drm); + if (err < 0) + goto hub; err = drm_dev_register(drm, 0); if (err < 0) - goto put; + goto fb; return 0; +fb: + tegra_drm_fb_exit(drm); +hub: + if (tegra->hub) + tegra_display_hub_cleanup(tegra->hub); +device: + if (tegra->domain) { + mutex_destroy(&tegra->mm_lock); + drm_mm_takedown(&tegra->mm); + put_iova_domain(&tegra->carveout.domain); + iova_cache_put(); + } + + host1x_device_exit(dev); +fbdev: + drm_kms_helper_poll_fini(drm); + tegra_drm_fb_free(drm); +config: + drm_mode_config_cleanup(drm); +domain: + if (tegra->domain) + iommu_domain_free(tegra->domain); +free: + kfree(tegra); put: drm_dev_put(drm); return err; @@ -1229,8 +1233,29 @@ put: static int host1x_drm_remove(struct host1x_device *dev) { struct drm_device *drm = dev_get_drvdata(&dev->dev); + struct tegra_drm *tegra = drm->dev_private; + int err; drm_dev_unregister(drm); + + drm_kms_helper_poll_fini(drm); + tegra_drm_fb_exit(drm); + drm_atomic_helper_shutdown(drm); + drm_mode_config_cleanup(drm); + + err = host1x_device_exit(dev); + if (err < 0) + dev_err(&dev->dev, "host1x device cleanup failed: %d\n", err); + + if (tegra->domain) { + mutex_destroy(&tegra->mm_lock); + drm_mm_takedown(&tegra->mm); + put_iova_domain(&tegra->carveout.domain); + iova_cache_put(); + iommu_domain_free(tegra->domain); + } + + kfree(tegra); drm_dev_put(drm); return 0; diff --git a/drivers/gpu/drm/tegra/drm.h b/drivers/gpu/drm/tegra/drm.h index 29911eff9ceb..d941553f7a3d 100644 --- a/drivers/gpu/drm/tegra/drm.h +++ b/drivers/gpu/drm/tegra/drm.h @@ -36,7 +36,7 @@ struct tegra_drm { struct drm_device *drm; struct iommu_domain *domain; - struct iommu_group *group; + bool use_explicit_iommu; struct mutex mm_lock; struct drm_mm mm; @@ -100,10 +100,8 @@ int tegra_drm_register_client(struct tegra_drm *tegra, struct tegra_drm_client *client); int tegra_drm_unregister_client(struct tegra_drm *tegra, struct tegra_drm_client *client); -struct iommu_group *host1x_client_iommu_attach(struct host1x_client *client, - bool shared); -void host1x_client_iommu_detach(struct host1x_client *client, - struct iommu_group *group); +int host1x_client_iommu_attach(struct host1x_client *client); +void host1x_client_iommu_detach(struct host1x_client *client); int tegra_drm_init(struct tegra_drm *tegra, struct drm_device *drm); int tegra_drm_exit(struct tegra_drm *tegra); @@ -155,17 +153,12 @@ void tegra_output_connector_destroy(struct drm_connector *connector); void tegra_output_encoder_destroy(struct drm_encoder *encoder); /* from dpaux.c */ -struct drm_dp_link; - struct drm_dp_aux *drm_dp_aux_find_by_of_node(struct device_node *np); enum drm_connector_status drm_dp_aux_detect(struct drm_dp_aux *aux); int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output); int drm_dp_aux_detach(struct drm_dp_aux *aux); int drm_dp_aux_enable(struct drm_dp_aux *aux); int drm_dp_aux_disable(struct drm_dp_aux *aux); -int drm_dp_aux_prepare(struct drm_dp_aux *aux, u8 encoding); -int drm_dp_aux_train(struct drm_dp_aux *aux, struct drm_dp_link *link, - u8 pattern); /* from fb.c */ struct tegra_bo *tegra_fb_get_plane(struct drm_framebuffer *framebuffer, diff --git a/drivers/gpu/drm/tegra/falcon.c b/drivers/gpu/drm/tegra/falcon.c index f49ad36e24db..56edef06c48e 100644 --- a/drivers/gpu/drm/tegra/falcon.c +++ b/drivers/gpu/drm/tegra/falcon.c @@ -58,32 +58,17 @@ static int falcon_copy_chunk(struct falcon *falcon, static void falcon_copy_firmware_image(struct falcon *falcon, const struct firmware *firmware) { - u32 *firmware_vaddr = falcon->firmware.vaddr; - dma_addr_t daddr; + u32 *virt = falcon->firmware.virt; size_t i; - int err; /* copy the whole thing taking into account endianness */ for (i = 0; i < firmware->size / sizeof(u32); i++) - firmware_vaddr[i] = le32_to_cpu(((u32 *)firmware->data)[i]); - - /* ensure that caches are flushed and falcon can see the firmware */ - daddr = dma_map_single(falcon->dev, firmware_vaddr, - falcon->firmware.size, DMA_TO_DEVICE); - err = dma_mapping_error(falcon->dev, daddr); - if (err) { - dev_err(falcon->dev, "failed to map firmware: %d\n", err); - return; - } - dma_sync_single_for_device(falcon->dev, daddr, - falcon->firmware.size, DMA_TO_DEVICE); - dma_unmap_single(falcon->dev, daddr, falcon->firmware.size, - DMA_TO_DEVICE); + virt[i] = le32_to_cpu(((u32 *)firmware->data)[i]); } static int falcon_parse_firmware_image(struct falcon *falcon) { - struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.vaddr; + struct falcon_fw_bin_header_v1 *bin = (void *)falcon->firmware.virt; struct falcon_fw_os_header_v1 *os; /* endian problems would show up right here */ @@ -104,7 +89,7 @@ static int falcon_parse_firmware_image(struct falcon *falcon) return -EINVAL; } - os = falcon->firmware.vaddr + bin->os_header_offset; + os = falcon->firmware.virt + bin->os_header_offset; falcon->firmware.bin_data.size = bin->os_size; falcon->firmware.bin_data.offset = bin->os_data_offset; @@ -125,6 +110,8 @@ int falcon_read_firmware(struct falcon *falcon, const char *name) if (err < 0) return err; + falcon->firmware.size = falcon->firmware.firmware->size; + return 0; } @@ -133,16 +120,6 @@ int falcon_load_firmware(struct falcon *falcon) const struct firmware *firmware = falcon->firmware.firmware; int err; - falcon->firmware.size = firmware->size; - - /* allocate iova space for the firmware */ - falcon->firmware.vaddr = falcon->ops->alloc(falcon, firmware->size, - &falcon->firmware.paddr); - if (IS_ERR(falcon->firmware.vaddr)) { - dev_err(falcon->dev, "DMA memory mapping failed\n"); - return PTR_ERR(falcon->firmware.vaddr); - } - /* copy firmware image into local area. this also ensures endianness */ falcon_copy_firmware_image(falcon, firmware); @@ -150,45 +127,26 @@ int falcon_load_firmware(struct falcon *falcon) err = falcon_parse_firmware_image(falcon); if (err < 0) { dev_err(falcon->dev, "failed to parse firmware image\n"); - goto err_setup_firmware_image; + return err; } release_firmware(firmware); falcon->firmware.firmware = NULL; return 0; - -err_setup_firmware_image: - falcon->ops->free(falcon, falcon->firmware.size, - falcon->firmware.paddr, falcon->firmware.vaddr); - - return err; } int falcon_init(struct falcon *falcon) { - /* check mandatory ops */ - if (!falcon->ops || !falcon->ops->alloc || !falcon->ops->free) - return -EINVAL; - - falcon->firmware.vaddr = NULL; + falcon->firmware.virt = NULL; return 0; } void falcon_exit(struct falcon *falcon) { - if (falcon->firmware.firmware) { + if (falcon->firmware.firmware) release_firmware(falcon->firmware.firmware); - falcon->firmware.firmware = NULL; - } - - if (falcon->firmware.vaddr) { - falcon->ops->free(falcon, falcon->firmware.size, - falcon->firmware.paddr, - falcon->firmware.vaddr); - falcon->firmware.vaddr = NULL; - } } int falcon_boot(struct falcon *falcon) @@ -197,7 +155,7 @@ int falcon_boot(struct falcon *falcon) u32 value; int err; - if (!falcon->firmware.vaddr) + if (!falcon->firmware.virt) return -EINVAL; err = readl_poll_timeout(falcon->regs + FALCON_DMACTL, value, @@ -210,7 +168,7 @@ int falcon_boot(struct falcon *falcon) falcon_writel(falcon, 0, FALCON_DMACTL); /* setup the address of the binary data so Falcon can access it later */ - falcon_writel(falcon, (falcon->firmware.paddr + + falcon_writel(falcon, (falcon->firmware.iova + falcon->firmware.bin_data.offset) >> 8, FALCON_DMATRFBASE); diff --git a/drivers/gpu/drm/tegra/falcon.h b/drivers/gpu/drm/tegra/falcon.h index 3d1243217410..c56ee32d92ee 100644 --- a/drivers/gpu/drm/tegra/falcon.h +++ b/drivers/gpu/drm/tegra/falcon.h @@ -74,15 +74,6 @@ struct falcon_fw_os_header_v1 { u32 data_size; }; -struct falcon; - -struct falcon_ops { - void *(*alloc)(struct falcon *falcon, size_t size, - dma_addr_t *paddr); - void (*free)(struct falcon *falcon, size_t size, - dma_addr_t paddr, void *vaddr); -}; - struct falcon_firmware_section { unsigned long offset; size_t size; @@ -93,8 +84,9 @@ struct falcon_firmware { const struct firmware *firmware; /* Raw firmware data */ - dma_addr_t paddr; - void *vaddr; + dma_addr_t iova; + dma_addr_t phys; + void *virt; size_t size; /* Parsed firmware information */ @@ -107,8 +99,6 @@ struct falcon { /* Set by falcon client */ struct device *dev; void __iomem *regs; - const struct falcon_ops *ops; - void *data; struct falcon_firmware firmware; }; diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c index e34325c83d28..7cea89f29a5c 100644 --- a/drivers/gpu/drm/tegra/fb.c +++ b/drivers/gpu/drm/tegra/fb.c @@ -269,10 +269,10 @@ static int tegra_fbdev_probe(struct drm_fb_helper *helper, } } - drm->mode_config.fb_base = (resource_size_t)bo->paddr; + drm->mode_config.fb_base = (resource_size_t)bo->iova; info->screen_base = (void __iomem *)bo->vaddr + offset; info->screen_size = size; - info->fix.smem_start = (unsigned long)(bo->paddr + offset); + info->fix.smem_start = (unsigned long)(bo->iova + offset); info->fix.smem_len = size; return 0; diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c index fb7667c8dd4c..746dae32c484 100644 --- a/drivers/gpu/drm/tegra/gem.c +++ b/drivers/gpu/drm/tegra/gem.c @@ -27,17 +27,55 @@ static void tegra_bo_put(struct host1x_bo *bo) drm_gem_object_put_unlocked(&obj->gem); } -static dma_addr_t tegra_bo_pin(struct host1x_bo *bo, struct sg_table **sgt) +static struct sg_table *tegra_bo_pin(struct device *dev, struct host1x_bo *bo, + dma_addr_t *phys) { struct tegra_bo *obj = host1x_to_tegra_bo(bo); + struct sg_table *sgt; + int err; + + /* + * If we've manually mapped the buffer object through the IOMMU, make + * sure to return the IOVA address of our mapping. + */ + if (phys && obj->mm) { + *phys = obj->iova; + return NULL; + } + + /* + * If we don't have a mapping for this buffer yet, return an SG table + * so that host1x can do the mapping for us via the DMA API. + */ + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); - *sgt = obj->sgt; + if (obj->pages) { + err = sg_alloc_table_from_pages(sgt, obj->pages, obj->num_pages, + 0, obj->gem.size, GFP_KERNEL); + if (err < 0) + goto free; + } else { + err = dma_get_sgtable(dev, sgt, obj->vaddr, obj->iova, + obj->gem.size); + if (err < 0) + goto free; + } - return obj->paddr; + return sgt; + +free: + kfree(sgt); + return ERR_PTR(err); } -static void tegra_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt) +static void tegra_bo_unpin(struct device *dev, struct sg_table *sgt) { + if (sgt) { + sg_free_table(sgt); + kfree(sgt); + } } static void *tegra_bo_mmap(struct host1x_bo *bo) @@ -133,9 +171,9 @@ static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo) goto unlock; } - bo->paddr = bo->mm->start; + bo->iova = bo->mm->start; - bo->size = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl, + bo->size = iommu_map_sg(tegra->domain, bo->iova, bo->sgt->sgl, bo->sgt->nents, prot); if (!bo->size) { dev_err(tegra->drm->dev, "failed to map buffer\n"); @@ -161,7 +199,7 @@ static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo) return 0; mutex_lock(&tegra->mm_lock); - iommu_unmap(tegra->domain, bo->paddr, bo->size); + iommu_unmap(tegra->domain, bo->iova, bo->size); drm_mm_remove_node(bo->mm); mutex_unlock(&tegra->mm_lock); @@ -209,7 +247,7 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo) sg_free_table(bo->sgt); kfree(bo->sgt); } else if (bo->vaddr) { - dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->paddr); + dma_free_wc(drm->dev, bo->gem.size, bo->vaddr, bo->iova); } } @@ -264,7 +302,7 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) } else { size_t size = bo->gem.size; - bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->paddr, + bo->vaddr = dma_alloc_wc(drm->dev, size, &bo->iova, GFP_KERNEL | __GFP_NOWARN); if (!bo->vaddr) { dev_err(drm->dev, @@ -365,7 +403,7 @@ static struct tegra_bo *tegra_bo_import(struct drm_device *drm, goto detach; } - bo->paddr = sg_dma_address(bo->sgt->sgl); + bo->iova = sg_dma_address(bo->sgt->sgl); } bo->gem.import_attach = attach; @@ -461,7 +499,7 @@ int __tegra_gem_mmap(struct drm_gem_object *gem, struct vm_area_struct *vma) vma->vm_flags &= ~VM_PFNMAP; vma->vm_pgoff = 0; - err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->paddr, + err = dma_mmap_wc(gem->dev->dev, vma, bo->vaddr, bo->iova, gem->size); if (err < 0) { drm_gem_vm_close(vma); @@ -508,25 +546,18 @@ tegra_gem_prime_map_dma_buf(struct dma_buf_attachment *attach, return NULL; if (bo->pages) { - struct scatterlist *sg; - unsigned int i; - - if (sg_alloc_table(sgt, bo->num_pages, GFP_KERNEL)) - goto free; - - for_each_sg(sgt->sgl, sg, bo->num_pages, i) - sg_set_page(sg, bo->pages[i], PAGE_SIZE, 0); - - if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + if (sg_alloc_table_from_pages(sgt, bo->pages, bo->num_pages, + 0, gem->size, GFP_KERNEL) < 0) goto free; } else { - if (sg_alloc_table(sgt, 1, GFP_KERNEL)) + if (dma_get_sgtable(attach->dev, sgt, bo->vaddr, bo->iova, + gem->size) < 0) goto free; - - sg_dma_address(sgt->sgl) = bo->paddr; - sg_dma_len(sgt->sgl) = gem->size; } + if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) + goto free; + return sgt; free: diff --git a/drivers/gpu/drm/tegra/gem.h b/drivers/gpu/drm/tegra/gem.h index 83ffb1e14ca3..fafb5724499b 100644 --- a/drivers/gpu/drm/tegra/gem.h +++ b/drivers/gpu/drm/tegra/gem.h @@ -31,7 +31,7 @@ struct tegra_bo { struct host1x_bo base; unsigned long flags; struct sg_table *sgt; - dma_addr_t paddr; + dma_addr_t iova; void *vaddr; struct drm_mm_node *mm; diff --git a/drivers/gpu/drm/tegra/gr2d.c b/drivers/gpu/drm/tegra/gr2d.c index 641299cc85b8..1fc4e56c7cc5 100644 --- a/drivers/gpu/drm/tegra/gr2d.c +++ b/drivers/gpu/drm/tegra/gr2d.c @@ -17,7 +17,6 @@ struct gr2d_soc { }; struct gr2d { - struct iommu_group *group; struct tegra_drm_client client; struct host1x_channel *channel; struct clk *clk; @@ -40,7 +39,7 @@ static int gr2d_init(struct host1x_client *client) struct gr2d *gr2d = to_gr2d(drm); int err; - gr2d->channel = host1x_channel_request(client->dev); + gr2d->channel = host1x_channel_request(client); if (!gr2d->channel) return -ENOMEM; @@ -51,9 +50,8 @@ static int gr2d_init(struct host1x_client *client) goto put; } - gr2d->group = host1x_client_iommu_attach(client, false); - if (IS_ERR(gr2d->group)) { - err = PTR_ERR(gr2d->group); + err = host1x_client_iommu_attach(client); + if (err < 0) { dev_err(client->dev, "failed to attach to domain: %d\n", err); goto free; } @@ -67,7 +65,7 @@ static int gr2d_init(struct host1x_client *client) return 0; detach: - host1x_client_iommu_detach(client, gr2d->group); + host1x_client_iommu_detach(client); free: host1x_syncpt_free(client->syncpts[0]); put: @@ -87,7 +85,7 @@ static int gr2d_exit(struct host1x_client *client) if (err < 0) return err; - host1x_client_iommu_detach(client, gr2d->group); + host1x_client_iommu_detach(client); host1x_syncpt_free(client->syncpts[0]); host1x_channel_put(gr2d->channel); diff --git a/drivers/gpu/drm/tegra/gr3d.c b/drivers/gpu/drm/tegra/gr3d.c index 8b9a35b1cbb3..24fae0f64032 100644 --- a/drivers/gpu/drm/tegra/gr3d.c +++ b/drivers/gpu/drm/tegra/gr3d.c @@ -23,7 +23,6 @@ struct gr3d_soc { }; struct gr3d { - struct iommu_group *group; struct tegra_drm_client client; struct host1x_channel *channel; struct clk *clk_secondary; @@ -49,7 +48,7 @@ static int gr3d_init(struct host1x_client *client) struct gr3d *gr3d = to_gr3d(drm); int err; - gr3d->channel = host1x_channel_request(client->dev); + gr3d->channel = host1x_channel_request(client); if (!gr3d->channel) return -ENOMEM; @@ -60,9 +59,8 @@ static int gr3d_init(struct host1x_client *client) goto put; } - gr3d->group = host1x_client_iommu_attach(client, false); - if (IS_ERR(gr3d->group)) { - err = PTR_ERR(gr3d->group); + err = host1x_client_iommu_attach(client); + if (err < 0) { dev_err(client->dev, "failed to attach to domain: %d\n", err); goto free; } @@ -76,7 +74,7 @@ static int gr3d_init(struct host1x_client *client) return 0; detach: - host1x_client_iommu_detach(client, gr3d->group); + host1x_client_iommu_detach(client); free: host1x_syncpt_free(client->syncpts[0]); put: @@ -95,7 +93,7 @@ static int gr3d_exit(struct host1x_client *client) if (err < 0) return err; - host1x_client_iommu_detach(client, gr3d->group); + host1x_client_iommu_detach(client); host1x_syncpt_free(client->syncpts[0]); host1x_channel_put(gr3d->channel); diff --git a/drivers/gpu/drm/tegra/hub.c b/drivers/gpu/drm/tegra/hub.c index 839b49c40e51..2b4082d0bc9e 100644 --- a/drivers/gpu/drm/tegra/hub.c +++ b/drivers/gpu/drm/tegra/hub.c @@ -413,7 +413,6 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, unsigned int zpos = plane->state->normalized_zpos; struct drm_framebuffer *fb = plane->state->fb; struct tegra_plane *p = to_tegra_plane(plane); - struct tegra_bo *bo; dma_addr_t base; u32 value; @@ -456,8 +455,7 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, /* disable compression */ tegra_plane_writel(p, 0, DC_WINBUF_CDE_CONTROL); - bo = tegra_fb_get_plane(fb, 0); - base = bo->paddr; + base = state->iova[0] + fb->offsets[0]; tegra_plane_writel(p, state->format, DC_WIN_COLOR_DEPTH); tegra_plane_writel(p, 0, DC_WIN_PRECOMP_WGRP_PARAMS); @@ -521,6 +519,8 @@ static void tegra_shared_plane_atomic_update(struct drm_plane *plane, } static const struct drm_plane_helper_funcs tegra_shared_plane_helper_funcs = { + .prepare_fb = tegra_plane_prepare_fb, + .cleanup_fb = tegra_plane_cleanup_fb, .atomic_check = tegra_shared_plane_atomic_check, .atomic_update = tegra_shared_plane_atomic_update, .atomic_disable = tegra_shared_plane_atomic_disable, diff --git a/drivers/gpu/drm/tegra/output.c b/drivers/gpu/drm/tegra/output.c index bdcaa4c7168c..34373734ff68 100644 --- a/drivers/gpu/drm/tegra/output.c +++ b/drivers/gpu/drm/tegra/output.c @@ -70,6 +70,11 @@ tegra_output_connector_detect(struct drm_connector *connector, bool force) void tegra_output_connector_destroy(struct drm_connector *connector) { + struct tegra_output *output = connector_to_output(connector); + + if (output->cec) + cec_notifier_conn_unregister(output->cec); + drm_connector_unregister(connector); drm_connector_cleanup(connector); } @@ -163,18 +168,11 @@ int tegra_output_probe(struct tegra_output *output) disable_irq(output->hpd_irq); } - output->cec = cec_notifier_get(output->dev); - if (!output->cec) - return -ENOMEM; - return 0; } void tegra_output_remove(struct tegra_output *output) { - if (output->cec) - cec_notifier_put(output->cec); - if (output->hpd_gpio) free_irq(output->hpd_irq, output); @@ -184,6 +182,7 @@ void tegra_output_remove(struct tegra_output *output) int tegra_output_init(struct drm_device *drm, struct tegra_output *output) { + int connector_type; int err; if (output->panel) { @@ -199,6 +198,21 @@ int tegra_output_init(struct drm_device *drm, struct tegra_output *output) if (output->hpd_gpio) enable_irq(output->hpd_irq); + connector_type = output->connector.connector_type; + /* + * Create a CEC notifier for HDMI connector. + */ + if (connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector_type == DRM_MODE_CONNECTOR_HDMIB) { + struct cec_connector_info conn_info; + + cec_fill_conn_info_from_drm(&conn_info, &output->connector); + output->cec = cec_notifier_conn_register(output->dev, NULL, + &conn_info); + if (!output->cec) + return -ENOMEM; + } + return 0; } diff --git a/drivers/gpu/drm/tegra/plane.c b/drivers/gpu/drm/tegra/plane.c index 6bab71d6e81d..163b590be224 100644 --- a/drivers/gpu/drm/tegra/plane.c +++ b/drivers/gpu/drm/tegra/plane.c @@ -6,6 +6,7 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include "dc.h" @@ -23,6 +24,7 @@ static void tegra_plane_reset(struct drm_plane *plane) { struct tegra_plane *p = to_tegra_plane(plane); struct tegra_plane_state *state; + unsigned int i; if (plane->state) __drm_atomic_helper_plane_destroy_state(plane->state); @@ -36,6 +38,9 @@ static void tegra_plane_reset(struct drm_plane *plane) plane->state->plane = plane; plane->state->zpos = p->index; plane->state->normalized_zpos = p->index; + + for (i = 0; i < 3; i++) + state->iova[i] = DMA_MAPPING_ERROR; } } @@ -60,6 +65,11 @@ tegra_plane_atomic_duplicate_state(struct drm_plane *plane) for (i = 0; i < 2; i++) copy->blending[i] = state->blending[i]; + for (i = 0; i < 3; i++) { + copy->iova[i] = DMA_MAPPING_ERROR; + copy->sgt[i] = NULL; + } + return ©->base; } @@ -95,6 +105,100 @@ const struct drm_plane_funcs tegra_plane_funcs = { .format_mod_supported = tegra_plane_format_mod_supported, }; +static int tegra_dc_pin(struct tegra_dc *dc, struct tegra_plane_state *state) +{ + unsigned int i; + int err; + + for (i = 0; i < state->base.fb->format->num_planes; i++) { + struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); + + if (!dc->client.group) { + struct sg_table *sgt; + + sgt = host1x_bo_pin(dc->dev, &bo->base, NULL); + if (IS_ERR(sgt)) { + err = PTR_ERR(sgt); + goto unpin; + } + + err = dma_map_sg(dc->dev, sgt->sgl, sgt->nents, + DMA_TO_DEVICE); + if (err == 0) { + err = -ENOMEM; + goto unpin; + } + + state->iova[i] = sg_dma_address(sgt->sgl); + state->sgt[i] = sgt; + } else { + state->iova[i] = bo->iova; + } + } + + return 0; + +unpin: + dev_err(dc->dev, "failed to map plane %u: %d\n", i, err); + + while (i--) { + struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); + struct sg_table *sgt = state->sgt[i]; + + dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE); + host1x_bo_unpin(dc->dev, &bo->base, sgt); + + state->iova[i] = DMA_MAPPING_ERROR; + state->sgt[i] = NULL; + } + + return err; +} + +static void tegra_dc_unpin(struct tegra_dc *dc, struct tegra_plane_state *state) +{ + unsigned int i; + + for (i = 0; i < state->base.fb->format->num_planes; i++) { + struct tegra_bo *bo = tegra_fb_get_plane(state->base.fb, i); + + if (!dc->client.group) { + struct sg_table *sgt = state->sgt[i]; + + if (sgt) { + dma_unmap_sg(dc->dev, sgt->sgl, sgt->nents, + DMA_TO_DEVICE); + host1x_bo_unpin(dc->dev, &bo->base, sgt); + } + } + + state->iova[i] = DMA_MAPPING_ERROR; + state->sgt[i] = NULL; + } +} + +int tegra_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct tegra_dc *dc = to_tegra_dc(state->crtc); + + if (!state->fb) + return 0; + + drm_gem_fb_prepare_fb(plane, state); + + return tegra_dc_pin(dc, to_tegra_plane_state(state)); +} + +void tegra_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct tegra_dc *dc = to_tegra_dc(state->crtc); + + if (dc) + tegra_dc_unpin(dc, to_tegra_plane_state(state)); +} + int tegra_plane_state_add(struct tegra_plane *plane, struct drm_plane_state *state) { diff --git a/drivers/gpu/drm/tegra/plane.h b/drivers/gpu/drm/tegra/plane.h index 510c394e6d9a..a158a915109a 100644 --- a/drivers/gpu/drm/tegra/plane.h +++ b/drivers/gpu/drm/tegra/plane.h @@ -39,6 +39,9 @@ struct tegra_plane_legacy_blending_state { struct tegra_plane_state { struct drm_plane_state base; + struct sg_table *sgt[3]; + dma_addr_t iova[3]; + struct tegra_bo_tiling tiling; u32 format; u32 swap; @@ -61,6 +64,11 @@ to_tegra_plane_state(struct drm_plane_state *state) extern const struct drm_plane_funcs tegra_plane_funcs; +int tegra_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *state); +void tegra_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *state); + int tegra_plane_state_add(struct tegra_plane *plane, struct drm_plane_state *state); diff --git a/drivers/gpu/drm/tegra/sor.c b/drivers/gpu/drm/tegra/sor.c index e1669ada0a40..615cb319fa8b 100644 --- a/drivers/gpu/drm/tegra/sor.c +++ b/drivers/gpu/drm/tegra/sor.c @@ -25,6 +25,7 @@ #include <drm/drm_scdc_helper.h> #include "dc.h" +#include "dp.h" #include "drm.h" #include "hda.h" #include "sor.h" @@ -370,10 +371,11 @@ struct tegra_sor_regs { }; struct tegra_sor_soc { - bool supports_edp; bool supports_lvds; bool supports_hdmi; bool supports_dp; + bool supports_audio; + bool supports_hdcp; const struct tegra_sor_regs *regs; bool has_nvdisplay; @@ -382,6 +384,12 @@ struct tegra_sor_soc { unsigned int num_settings; const u8 *xbar_cfg; + const u8 *lane_map; + + const u8 (*voltage_swing)[4][4]; + const u8 (*pre_emphasis)[4][4]; + const u8 (*post_cursor)[4][4]; + const u8 (*tx_pu)[4][4]; }; struct tegra_sor; @@ -390,6 +398,8 @@ struct tegra_sor_ops { const char *name; int (*probe)(struct tegra_sor *sor); int (*remove)(struct tegra_sor *sor); + void (*audio_enable)(struct tegra_sor *sor); + void (*audio_disable)(struct tegra_sor *sor); }; struct tegra_sor { @@ -412,6 +422,7 @@ struct tegra_sor { u8 xbar_cfg[5]; + struct drm_dp_link link; struct drm_dp_aux *aux; struct drm_info_list *debugfs_files; @@ -514,10 +525,19 @@ static inline struct tegra_clk_sor_pad *to_pad(struct clk_hw *hw) return container_of(hw, struct tegra_clk_sor_pad, hw); } -static const char * const tegra_clk_sor_pad_parents[] = { - "pll_d2_out0", "pll_dp" +static const char * const tegra_clk_sor_pad_parents[2][2] = { + { "pll_d_out0", "pll_dp" }, + { "pll_d2_out0", "pll_dp" }, }; +/* + * Implementing ->set_parent() here isn't really required because the parent + * will be explicitly selected in the driver code via the DP_CLK_SEL mux in + * the SOR_CLK_CNTRL register. This is primarily for compatibility with the + * Tegra186 and later SoC generations where the BPMP implements this clock + * and doesn't expose the mux via the common clock framework. + */ + static int tegra_clk_sor_pad_set_parent(struct clk_hw *hw, u8 index) { struct tegra_clk_sor_pad *pad = to_pad(hw); @@ -586,8 +606,8 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor, init.name = name; init.flags = 0; - init.parent_names = tegra_clk_sor_pad_parents; - init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents); + init.parent_names = tegra_clk_sor_pad_parents[sor->index]; + init.num_parents = ARRAY_SIZE(tegra_clk_sor_pad_parents[sor->index]); init.ops = &tegra_clk_sor_pad_ops; pad->hw.init = &init; @@ -597,112 +617,340 @@ static struct clk *tegra_clk_sor_pad_register(struct tegra_sor *sor, return clk; } -static int tegra_sor_dp_train_fast(struct tegra_sor *sor, - struct drm_dp_link *link) +static void tegra_sor_filter_rates(struct tegra_sor *sor) { + struct drm_dp_link *link = &sor->link; unsigned int i; - u8 pattern; + + /* Tegra only supports RBR, HBR and HBR2 */ + for (i = 0; i < link->num_rates; i++) { + switch (link->rates[i]) { + case 1620000: + case 2700000: + case 5400000: + break; + + default: + DRM_DEBUG_KMS("link rate %lu kHz not supported\n", + link->rates[i]); + link->rates[i] = 0; + break; + } + } + + drm_dp_link_update_rates(link); +} + +static int tegra_sor_power_up_lanes(struct tegra_sor *sor, unsigned int lanes) +{ + unsigned long timeout; u32 value; - int err; - /* setup lane parameters */ - value = SOR_LANE_DRIVE_CURRENT_LANE3(0x40) | - SOR_LANE_DRIVE_CURRENT_LANE2(0x40) | - SOR_LANE_DRIVE_CURRENT_LANE1(0x40) | - SOR_LANE_DRIVE_CURRENT_LANE0(0x40); - tegra_sor_writel(sor, value, SOR_LANE_DRIVE_CURRENT0); + /* + * Clear or set the PD_TXD bit corresponding to each lane, depending + * on whether it is used or not. + */ + value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value = SOR_LANE_PREEMPHASIS_LANE3(0x0f) | - SOR_LANE_PREEMPHASIS_LANE2(0x0f) | - SOR_LANE_PREEMPHASIS_LANE1(0x0f) | - SOR_LANE_PREEMPHASIS_LANE0(0x0f); - tegra_sor_writel(sor, value, SOR_LANE_PREEMPHASIS0); + if (lanes <= 2) + value &= ~(SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) | + SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2])); + else + value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[3]) | + SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[2]); - value = SOR_LANE_POSTCURSOR_LANE3(0x00) | - SOR_LANE_POSTCURSOR_LANE2(0x00) | - SOR_LANE_POSTCURSOR_LANE1(0x00) | - SOR_LANE_POSTCURSOR_LANE0(0x00); - tegra_sor_writel(sor, value, SOR_LANE_POSTCURSOR0); + if (lanes <= 1) + value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]); + else + value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[1]); - /* disable LVDS mode */ - tegra_sor_writel(sor, 0, SOR_LVDS); + if (lanes == 0) + value &= ~SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]); + else + value |= SOR_DP_PADCTL_PD_TXD(sor->soc->lane_map[0]); + + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); + + /* start lane sequencer */ + value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN | + SOR_LANE_SEQ_CTL_POWER_STATE_UP; + tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); + + timeout = jiffies + msecs_to_jiffies(250); + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) + break; + + usleep_range(250, 1000); + } + + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) + return -ETIMEDOUT; + + return 0; +} + +static int tegra_sor_power_down_lanes(struct tegra_sor *sor) +{ + unsigned long timeout; + u32 value; + + /* power down all lanes */ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value |= SOR_DP_PADCTL_TX_PU_ENABLE; - value &= ~SOR_DP_PADCTL_TX_PU_MASK; - value |= SOR_DP_PADCTL_TX_PU(2); /* XXX: don't hardcode? */ + value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | + SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2); tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); + /* start lane sequencer */ + value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP | + SOR_LANE_SEQ_CTL_POWER_STATE_DOWN; + tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); + + timeout = jiffies + msecs_to_jiffies(250); + + while (time_before(jiffies, timeout)) { + value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) + break; + + usleep_range(25, 100); + } + + if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) + return -ETIMEDOUT; + + return 0; +} + +static void tegra_sor_dp_precharge(struct tegra_sor *sor, unsigned int lanes) +{ + u32 value; + + /* pre-charge all used lanes */ value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value |= SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 | - SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0; + + if (lanes <= 2) + value &= ~(SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) | + SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2])); + else + value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[3]) | + SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[2]); + + if (lanes <= 1) + value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]); + else + value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[1]); + + if (lanes == 0) + value &= ~SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]); + else + value |= SOR_DP_PADCTL_CM_TXD(sor->soc->lane_map[0]); + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - usleep_range(10, 100); + usleep_range(15, 100); value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); value &= ~(SOR_DP_PADCTL_CM_TXD_3 | SOR_DP_PADCTL_CM_TXD_2 | SOR_DP_PADCTL_CM_TXD_1 | SOR_DP_PADCTL_CM_TXD_0); tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); +} - err = drm_dp_aux_prepare(sor->aux, DP_SET_ANSI_8B10B); - if (err < 0) - return err; +static void tegra_sor_dp_term_calibrate(struct tegra_sor *sor) +{ + u32 mask = 0x08, adj = 0, value; + + /* enable pad calibration logic */ + value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); + value &= ~SOR_DP_PADCTL_PAD_CAL_PD; + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); + + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + value |= SOR_PLL1_TMDS_TERM; + tegra_sor_writel(sor, value, sor->soc->regs->pll1); + + while (mask) { + adj |= mask; + + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + value &= ~SOR_PLL1_TMDS_TERMADJ_MASK; + value |= SOR_PLL1_TMDS_TERMADJ(adj); + tegra_sor_writel(sor, value, sor->soc->regs->pll1); - for (i = 0, value = 0; i < link->num_lanes; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_NONE | - SOR_DP_TPG_PATTERN_TRAIN1; - value = (value << 8) | lane; + usleep_range(100, 200); + + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + if (value & SOR_PLL1_TERM_COMPOUT) + adj &= ~mask; + + mask >>= 1; } - tegra_sor_writel(sor, value, SOR_DP_TPG); + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + value &= ~SOR_PLL1_TMDS_TERMADJ_MASK; + value |= SOR_PLL1_TMDS_TERMADJ(adj); + tegra_sor_writel(sor, value, sor->soc->regs->pll1); - pattern = DP_TRAINING_PATTERN_1; + /* disable pad calibration logic */ + value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); + value |= SOR_DP_PADCTL_PAD_CAL_PD; + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); +} - err = drm_dp_aux_train(sor->aux, link, pattern); - if (err < 0) - return err; +static int tegra_sor_dp_link_apply_training(struct drm_dp_link *link) +{ + struct tegra_sor *sor = container_of(link, struct tegra_sor, link); + u32 voltage_swing = 0, pre_emphasis = 0, post_cursor = 0; + const struct tegra_sor_soc *soc = sor->soc; + u32 pattern = 0, tx_pu = 0, value; + unsigned int i; - value = tegra_sor_readl(sor, SOR_DP_SPARE0); - value |= SOR_DP_SPARE_SEQ_ENABLE; - value &= ~SOR_DP_SPARE_PANEL_INTERNAL; - value |= SOR_DP_SPARE_MACRO_SOR_CLK; - tegra_sor_writel(sor, value, SOR_DP_SPARE0); + for (value = 0, i = 0; i < link->lanes; i++) { + u8 vs = link->train.request.voltage_swing[i]; + u8 pe = link->train.request.pre_emphasis[i]; + u8 pc = link->train.request.post_cursor[i]; + u8 shift = sor->soc->lane_map[i] << 3; + + voltage_swing |= soc->voltage_swing[pc][vs][pe] << shift; + pre_emphasis |= soc->pre_emphasis[pc][vs][pe] << shift; + post_cursor |= soc->post_cursor[pc][vs][pe] << shift; + + if (sor->soc->tx_pu[pc][vs][pe] > tx_pu) + tx_pu = sor->soc->tx_pu[pc][vs][pe]; + + switch (link->train.pattern) { + case DP_TRAINING_PATTERN_DISABLE: + value = SOR_DP_TPG_SCRAMBLER_GALIOS | + SOR_DP_TPG_PATTERN_NONE; + break; + + case DP_TRAINING_PATTERN_1: + value = SOR_DP_TPG_SCRAMBLER_NONE | + SOR_DP_TPG_PATTERN_TRAIN1; + break; + + case DP_TRAINING_PATTERN_2: + value = SOR_DP_TPG_SCRAMBLER_NONE | + SOR_DP_TPG_PATTERN_TRAIN2; + break; + + case DP_TRAINING_PATTERN_3: + value = SOR_DP_TPG_SCRAMBLER_NONE | + SOR_DP_TPG_PATTERN_TRAIN3; + break; + + default: + return -EINVAL; + } + + if (link->caps.channel_coding) + value |= SOR_DP_TPG_CHANNEL_CODING; - for (i = 0, value = 0; i < link->num_lanes; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_NONE | - SOR_DP_TPG_PATTERN_TRAIN2; - value = (value << 8) | lane; + pattern = pattern << 8 | value; } - tegra_sor_writel(sor, value, SOR_DP_TPG); + tegra_sor_writel(sor, voltage_swing, SOR_LANE_DRIVE_CURRENT0); + tegra_sor_writel(sor, pre_emphasis, SOR_LANE_PREEMPHASIS0); - pattern = DP_LINK_SCRAMBLING_DISABLE | DP_TRAINING_PATTERN_2; + if (link->caps.tps3_supported) + tegra_sor_writel(sor, post_cursor, SOR_LANE_POSTCURSOR0); - err = drm_dp_aux_train(sor->aux, link, pattern); - if (err < 0) - return err; + tegra_sor_writel(sor, pattern, SOR_DP_TPG); - for (i = 0, value = 0; i < link->num_lanes; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_GALIOS | - SOR_DP_TPG_PATTERN_NONE; - value = (value << 8) | lane; + value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); + value &= ~SOR_DP_PADCTL_TX_PU_MASK; + value |= SOR_DP_PADCTL_TX_PU_ENABLE; + value |= SOR_DP_PADCTL_TX_PU(tx_pu); + tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); + + usleep_range(20, 100); + + return 0; +} + +static int tegra_sor_dp_link_configure(struct drm_dp_link *link) +{ + struct tegra_sor *sor = container_of(link, struct tegra_sor, link); + unsigned int rate, lanes; + u32 value; + int err; + + rate = drm_dp_link_rate_to_bw_code(link->rate); + lanes = link->lanes; + + /* configure link speed and lane count */ + value = tegra_sor_readl(sor, SOR_CLK_CNTRL); + value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; + value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate); + tegra_sor_writel(sor, value, SOR_CLK_CNTRL); + + value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); + value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; + value |= SOR_DP_LINKCTL_LANE_COUNT(lanes); + + if (link->caps.enhanced_framing) + value |= SOR_DP_LINKCTL_ENHANCED_FRAME; + + tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); + + usleep_range(400, 1000); + + /* configure load pulse position adjustment */ + value = tegra_sor_readl(sor, sor->soc->regs->pll1); + value &= ~SOR_PLL1_LOADADJ_MASK; + + switch (rate) { + case DP_LINK_BW_1_62: + value |= SOR_PLL1_LOADADJ(0x3); + break; + + case DP_LINK_BW_2_7: + value |= SOR_PLL1_LOADADJ(0x4); + break; + + case DP_LINK_BW_5_4: + value |= SOR_PLL1_LOADADJ(0x6); + break; } - tegra_sor_writel(sor, value, SOR_DP_TPG); + tegra_sor_writel(sor, value, sor->soc->regs->pll1); - pattern = DP_TRAINING_PATTERN_DISABLE; + /* use alternate scrambler reset for eDP */ + value = tegra_sor_readl(sor, SOR_DP_SPARE0); - err = drm_dp_aux_train(sor->aux, link, pattern); - if (err < 0) + if (link->edp == 0) + value &= ~SOR_DP_SPARE_PANEL_INTERNAL; + else + value |= SOR_DP_SPARE_PANEL_INTERNAL; + + tegra_sor_writel(sor, value, SOR_DP_SPARE0); + + err = tegra_sor_power_down_lanes(sor); + if (err < 0) { + dev_err(sor->dev, "failed to power down lanes: %d\n", err); + return err; + } + + /* power up and pre-charge lanes */ + err = tegra_sor_power_up_lanes(sor, lanes); + if (err < 0) { + dev_err(sor->dev, "failed to power up %u lane%s: %d\n", + lanes, (lanes != 1) ? "s" : "", err); return err; + } + + tegra_sor_dp_precharge(sor, lanes); return 0; } +static const struct drm_dp_link_ops tegra_sor_dp_link_ops = { + .apply_training = tegra_sor_dp_link_apply_training, + .configure = tegra_sor_dp_link_configure, +}; + static void tegra_sor_super_update(struct tegra_sor *sor) { tegra_sor_writel(sor, 0, SOR_SUPER_STATE0); @@ -912,11 +1160,11 @@ static int tegra_sor_compute_config(struct tegra_sor *sor, u32 num_syms_per_line; unsigned int i; - if (!link_rate || !link->num_lanes || !pclk || !config->bits_per_pixel) + if (!link_rate || !link->lanes || !pclk || !config->bits_per_pixel) return -EINVAL; - output = link_rate * 8 * link->num_lanes; input = pclk * config->bits_per_pixel; + output = link_rate * 8 * link->lanes; if (input >= output) return -ERANGE; @@ -959,7 +1207,7 @@ static int tegra_sor_compute_config(struct tegra_sor *sor, watermark = div_u64(watermark + params.error, f); config->watermark = watermark + (config->bits_per_pixel / 8) + 2; num_syms_per_line = (mode->hdisplay * config->bits_per_pixel) * - (link->num_lanes * 8); + (link->lanes * 8); if (config->watermark > 30) { config->watermark = 30; @@ -976,15 +1224,15 @@ static int tegra_sor_compute_config(struct tegra_sor *sor, num = ((mode->htotal - mode->hdisplay) - 7) * link_rate; config->hblank_symbols = div_u64(num, pclk); - if (link->capabilities & DP_LINK_CAP_ENHANCED_FRAMING) + if (link->caps.enhanced_framing) config->hblank_symbols -= 3; - config->hblank_symbols -= 12 / link->num_lanes; + config->hblank_symbols -= 12 / link->lanes; /* compute the number of symbols per vertical blanking interval */ num = (mode->hdisplay - 25) * link_rate; config->vblank_symbols = div_u64(num, pclk); - config->vblank_symbols -= 36 / link->num_lanes + 4; + config->vblank_symbols -= 36 / link->lanes + 4; dev_dbg(sor->dev, "blank symbols: H:%u V:%u\n", config->hblank_symbols, config->vblank_symbols); @@ -1200,29 +1448,6 @@ static int tegra_sor_power_down(struct tegra_sor *sor) return err; } - value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_0 | - SOR_DP_PADCTL_PD_TXD_1 | SOR_DP_PADCTL_PD_TXD_2); - tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - - /* stop lane sequencer */ - value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_UP | - SOR_LANE_SEQ_CTL_POWER_STATE_DOWN; - tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); - - timeout = jiffies + msecs_to_jiffies(250); - - while (time_before(jiffies, timeout)) { - value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); - if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) - break; - - usleep_range(25, 100); - } - - if ((value & SOR_LANE_SEQ_CTL_TRIGGER) != 0) - return -ETIMEDOUT; - value = tegra_sor_readl(sor, sor->soc->regs->pll2); value |= SOR_PLL2_PORT_POWERDOWN; tegra_sor_writel(sor, value, sor->soc->regs->pll2); @@ -1584,403 +1809,6 @@ static const struct drm_encoder_funcs tegra_sor_encoder_funcs = { .destroy = tegra_output_encoder_destroy, }; -static void tegra_sor_edp_disable(struct drm_encoder *encoder) -{ - struct tegra_output *output = encoder_to_output(encoder); - struct tegra_dc *dc = to_tegra_dc(encoder->crtc); - struct tegra_sor *sor = to_sor(output); - u32 value; - int err; - - if (output->panel) - drm_panel_disable(output->panel); - - err = tegra_sor_detach(sor); - if (err < 0) - dev_err(sor->dev, "failed to detach SOR: %d\n", err); - - tegra_sor_writel(sor, 0, SOR_STATE1); - tegra_sor_update(sor); - - /* - * The following accesses registers of the display controller, so make - * sure it's only executed when the output is attached to one. - */ - if (dc) { - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); - value &= ~SOR_ENABLE(0); - tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - - tegra_dc_commit(dc); - } - - err = tegra_sor_power_down(sor); - if (err < 0) - dev_err(sor->dev, "failed to power down SOR: %d\n", err); - - if (sor->aux) { - err = drm_dp_aux_disable(sor->aux); - if (err < 0) - dev_err(sor->dev, "failed to disable DP: %d\n", err); - } - - err = tegra_io_pad_power_disable(sor->pad); - if (err < 0) - dev_err(sor->dev, "failed to power off I/O pad: %d\n", err); - - if (output->panel) - drm_panel_unprepare(output->panel); - - pm_runtime_put(sor->dev); -} - -#if 0 -static int calc_h_ref_to_sync(const struct drm_display_mode *mode, - unsigned int *value) -{ - unsigned int hfp, hsw, hbp, a = 0, b; - - hfp = mode->hsync_start - mode->hdisplay; - hsw = mode->hsync_end - mode->hsync_start; - hbp = mode->htotal - mode->hsync_end; - - pr_info("hfp: %u, hsw: %u, hbp: %u\n", hfp, hsw, hbp); - - b = hfp - 1; - - pr_info("a: %u, b: %u\n", a, b); - pr_info("a + hsw + hbp = %u\n", a + hsw + hbp); - - if (a + hsw + hbp <= 11) { - a = 1 + 11 - hsw - hbp; - pr_info("a: %u\n", a); - } - - if (a > b) - return -EINVAL; - - if (hsw < 1) - return -EINVAL; - - if (mode->hdisplay < 16) - return -EINVAL; - - if (value) { - if (b > a && a % 2) - *value = a + 1; - else - *value = a; - } - - return 0; -} -#endif - -static void tegra_sor_edp_enable(struct drm_encoder *encoder) -{ - struct drm_display_mode *mode = &encoder->crtc->state->adjusted_mode; - struct tegra_output *output = encoder_to_output(encoder); - struct tegra_dc *dc = to_tegra_dc(encoder->crtc); - struct tegra_sor *sor = to_sor(output); - struct tegra_sor_config config; - struct tegra_sor_state *state; - struct drm_dp_link link; - u8 rate, lanes; - unsigned int i; - int err = 0; - u32 value; - - state = to_sor_state(output->connector.state); - - pm_runtime_get_sync(sor->dev); - - if (output->panel) - drm_panel_prepare(output->panel); - - err = drm_dp_aux_enable(sor->aux); - if (err < 0) - dev_err(sor->dev, "failed to enable DP: %d\n", err); - - err = drm_dp_link_probe(sor->aux, &link); - if (err < 0) { - dev_err(sor->dev, "failed to probe eDP link: %d\n", err); - return; - } - - /* switch to safe parent clock */ - err = tegra_sor_set_parent_clock(sor, sor->clk_safe); - if (err < 0) - dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); - - memset(&config, 0, sizeof(config)); - config.bits_per_pixel = state->bpc * 3; - - err = tegra_sor_compute_config(sor, mode, &config, &link); - if (err < 0) - dev_err(sor->dev, "failed to compute configuration: %d\n", err); - - value = tegra_sor_readl(sor, SOR_CLK_CNTRL); - value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK; - value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK; - tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_BANDGAP_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - usleep_range(20, 100); - - value = tegra_sor_readl(sor, sor->soc->regs->pll3); - value |= SOR_PLL3_PLL_VDD_MODE_3V3; - tegra_sor_writel(sor, value, sor->soc->regs->pll3); - - value = SOR_PLL0_ICHPMP(0xf) | SOR_PLL0_VCOCAP_RST | - SOR_PLL0_PLLREG_LEVEL_V45 | SOR_PLL0_RESISTOR_EXT; - tegra_sor_writel(sor, value, sor->soc->regs->pll0); - - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value |= SOR_PLL2_SEQ_PLLCAPPD; - value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE; - value |= SOR_PLL2_LVDS_ENABLE; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - value = SOR_PLL1_TERM_COMPOUT | SOR_PLL1_TMDS_TERM; - tegra_sor_writel(sor, value, sor->soc->regs->pll1); - - while (true) { - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - if ((value & SOR_PLL2_SEQ_PLLCAPPD_ENFORCE) == 0) - break; - - usleep_range(250, 1000); - } - - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_POWERDOWN_OVERRIDE; - value &= ~SOR_PLL2_PORT_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - /* - * power up - */ - - /* set safe link bandwidth (1.62 Gbps) */ - value = tegra_sor_readl(sor, SOR_CLK_CNTRL); - value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; - value |= SOR_CLK_CNTRL_DP_LINK_SPEED_G1_62; - tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - - /* step 1 */ - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value |= SOR_PLL2_SEQ_PLLCAPPD_ENFORCE | SOR_PLL2_PORT_POWERDOWN | - SOR_PLL2_BANDGAP_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - value = tegra_sor_readl(sor, sor->soc->regs->pll0); - value |= SOR_PLL0_VCOPD | SOR_PLL0_PWR; - tegra_sor_writel(sor, value, sor->soc->regs->pll0); - - value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value &= ~SOR_DP_PADCTL_PAD_CAL_PD; - tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - - /* step 2 */ - err = tegra_io_pad_power_enable(sor->pad); - if (err < 0) - dev_err(sor->dev, "failed to power on I/O pad: %d\n", err); - - usleep_range(5, 100); - - /* step 3 */ - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_BANDGAP_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - usleep_range(20, 100); - - /* step 4 */ - value = tegra_sor_readl(sor, sor->soc->regs->pll0); - value &= ~SOR_PLL0_VCOPD; - value &= ~SOR_PLL0_PWR; - tegra_sor_writel(sor, value, sor->soc->regs->pll0); - - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - usleep_range(200, 1000); - - /* step 5 */ - value = tegra_sor_readl(sor, sor->soc->regs->pll2); - value &= ~SOR_PLL2_PORT_POWERDOWN; - tegra_sor_writel(sor, value, sor->soc->regs->pll2); - - /* XXX not in TRM */ - for (value = 0, i = 0; i < 5; i++) - value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->xbar_cfg[i]) | - SOR_XBAR_CTRL_LINK1_XSEL(i, i); - - tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL); - tegra_sor_writel(sor, value, SOR_XBAR_CTRL); - - /* switch to DP parent clock */ - err = tegra_sor_set_parent_clock(sor, sor->clk_dp); - if (err < 0) - dev_err(sor->dev, "failed to set parent clock: %d\n", err); - - /* power DP lanes */ - value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - - if (link.num_lanes <= 2) - value &= ~(SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2); - else - value |= SOR_DP_PADCTL_PD_TXD_3 | SOR_DP_PADCTL_PD_TXD_2; - - if (link.num_lanes <= 1) - value &= ~SOR_DP_PADCTL_PD_TXD_1; - else - value |= SOR_DP_PADCTL_PD_TXD_1; - - if (link.num_lanes == 0) - value &= ~SOR_DP_PADCTL_PD_TXD_0; - else - value |= SOR_DP_PADCTL_PD_TXD_0; - - tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - - value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); - value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; - value |= SOR_DP_LINKCTL_LANE_COUNT(link.num_lanes); - tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); - - /* start lane sequencer */ - value = SOR_LANE_SEQ_CTL_TRIGGER | SOR_LANE_SEQ_CTL_SEQUENCE_DOWN | - SOR_LANE_SEQ_CTL_POWER_STATE_UP; - tegra_sor_writel(sor, value, SOR_LANE_SEQ_CTL); - - while (true) { - value = tegra_sor_readl(sor, SOR_LANE_SEQ_CTL); - if ((value & SOR_LANE_SEQ_CTL_TRIGGER) == 0) - break; - - usleep_range(250, 1000); - } - - /* set link bandwidth */ - value = tegra_sor_readl(sor, SOR_CLK_CNTRL); - value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; - value |= drm_dp_link_rate_to_bw_code(link.rate) << 2; - tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - - tegra_sor_apply_config(sor, &config); - - /* enable link */ - value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); - value |= SOR_DP_LINKCTL_ENABLE; - value |= SOR_DP_LINKCTL_ENHANCED_FRAME; - tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); - - for (i = 0, value = 0; i < 4; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_GALIOS | - SOR_DP_TPG_PATTERN_NONE; - value = (value << 8) | lane; - } - - tegra_sor_writel(sor, value, SOR_DP_TPG); - - /* enable pad calibration logic */ - value = tegra_sor_readl(sor, sor->soc->regs->dp_padctl0); - value |= SOR_DP_PADCTL_PAD_CAL_PD; - tegra_sor_writel(sor, value, sor->soc->regs->dp_padctl0); - - err = drm_dp_link_probe(sor->aux, &link); - if (err < 0) - dev_err(sor->dev, "failed to probe eDP link: %d\n", err); - - err = drm_dp_link_power_up(sor->aux, &link); - if (err < 0) - dev_err(sor->dev, "failed to power up eDP link: %d\n", err); - - err = drm_dp_link_configure(sor->aux, &link); - if (err < 0) - dev_err(sor->dev, "failed to configure eDP link: %d\n", err); - - rate = drm_dp_link_rate_to_bw_code(link.rate); - lanes = link.num_lanes; - - value = tegra_sor_readl(sor, SOR_CLK_CNTRL); - value &= ~SOR_CLK_CNTRL_DP_LINK_SPEED_MASK; - value |= SOR_CLK_CNTRL_DP_LINK_SPEED(rate); - tegra_sor_writel(sor, value, SOR_CLK_CNTRL); - - value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); - value &= ~SOR_DP_LINKCTL_LANE_COUNT_MASK; - value |= SOR_DP_LINKCTL_LANE_COUNT(lanes); - - if (link.capabilities & DP_LINK_CAP_ENHANCED_FRAMING) - value |= SOR_DP_LINKCTL_ENHANCED_FRAME; - - tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); - - /* disable training pattern generator */ - - for (i = 0; i < link.num_lanes; i++) { - unsigned long lane = SOR_DP_TPG_CHANNEL_CODING | - SOR_DP_TPG_SCRAMBLER_GALIOS | - SOR_DP_TPG_PATTERN_NONE; - value = (value << 8) | lane; - } - - tegra_sor_writel(sor, value, SOR_DP_TPG); - - err = tegra_sor_dp_train_fast(sor, &link); - if (err < 0) - dev_err(sor->dev, "DP fast link training failed: %d\n", err); - - dev_dbg(sor->dev, "fast link training succeeded\n"); - - err = tegra_sor_power_up(sor, 250); - if (err < 0) - dev_err(sor->dev, "failed to power up SOR: %d\n", err); - - /* CSTM (LVDS, link A/B, upper) */ - value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B | - SOR_CSTM_UPPER; - tegra_sor_writel(sor, value, SOR_CSTM); - - /* use DP-A protocol */ - value = tegra_sor_readl(sor, SOR_STATE1); - value &= ~SOR_STATE_ASY_PROTOCOL_MASK; - value |= SOR_STATE_ASY_PROTOCOL_DP_A; - tegra_sor_writel(sor, value, SOR_STATE1); - - tegra_sor_mode_set(sor, mode, state); - - /* PWM setup */ - err = tegra_sor_setup_pwm(sor, 250); - if (err < 0) - dev_err(sor->dev, "failed to setup PWM: %d\n", err); - - tegra_sor_update(sor); - - value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); - value |= SOR_ENABLE(0); - tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); - - tegra_dc_commit(dc); - - err = tegra_sor_attach(sor); - if (err < 0) - dev_err(sor->dev, "failed to attach SOR: %d\n", err); - - err = tegra_sor_wakeup(sor); - if (err < 0) - dev_err(sor->dev, "failed to enable DC: %d\n", err); - - if (output->panel) - drm_panel_enable(output->panel); -} - static int tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, struct drm_crtc_state *crtc_state, @@ -2030,12 +1858,6 @@ tegra_sor_encoder_atomic_check(struct drm_encoder *encoder, return 0; } -static const struct drm_encoder_helper_funcs tegra_sor_edp_helpers = { - .disable = tegra_sor_edp_disable, - .enable = tegra_sor_edp_enable, - .atomic_check = tegra_sor_encoder_atomic_check, -}; - static inline u32 tegra_sor_hdmi_subpack(const u8 *ptr, size_t size) { u32 value = 0; @@ -2160,6 +1982,15 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor) { u32 value; + /* + * Enable and unmask the HDA codec SCRATCH0 register interrupt. This + * is used for interoperability between the HDA codec driver and the + * HDMI/DP driver. + */ + value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0; + tegra_sor_writel(sor, value, SOR_INT_ENABLE); + tegra_sor_writel(sor, value, SOR_INT_MASK); + tegra_sor_write_eld(sor); value = SOR_AUDIO_HDA_PRESENSE_ELDV | SOR_AUDIO_HDA_PRESENSE_PD; @@ -2169,6 +2000,32 @@ static void tegra_sor_audio_prepare(struct tegra_sor *sor) static void tegra_sor_audio_unprepare(struct tegra_sor *sor) { tegra_sor_writel(sor, 0, SOR_AUDIO_HDA_PRESENSE); + tegra_sor_writel(sor, 0, SOR_INT_MASK); + tegra_sor_writel(sor, 0, SOR_INT_ENABLE); +} + +static void tegra_sor_audio_enable(struct tegra_sor *sor) +{ + u32 value; + + value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL); + + /* select HDA audio input */ + value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK); + value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA); + + /* inject null samples */ + if (sor->format.channels != 2) + value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL; + else + value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL; + + value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH; + + tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL); + + /* enable advertising HBR capability */ + tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE); } static int tegra_sor_hdmi_enable_audio_infoframe(struct tegra_sor *sor) @@ -2206,24 +2063,7 @@ static void tegra_sor_hdmi_audio_enable(struct tegra_sor *sor) { u32 value; - value = tegra_sor_readl(sor, SOR_AUDIO_CNTRL); - - /* select HDA audio input */ - value &= ~SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_MASK); - value |= SOR_AUDIO_CNTRL_SOURCE_SELECT(SOURCE_SELECT_HDA); - - /* inject null samples */ - if (sor->format.channels != 2) - value &= ~SOR_AUDIO_CNTRL_INJECT_NULLSMPL; - else - value |= SOR_AUDIO_CNTRL_INJECT_NULLSMPL; - - value |= SOR_AUDIO_CNTRL_AFIFO_FLUSH; - - tegra_sor_writel(sor, value, SOR_AUDIO_CNTRL); - - /* enable advertising HBR capability */ - tegra_sor_writel(sor, SOR_AUDIO_SPARE_HBR_ENABLE, SOR_AUDIO_SPARE); + tegra_sor_audio_enable(sor); tegra_sor_writel(sor, 0, SOR_HDMI_ACR_CTRL); @@ -2399,9 +2239,9 @@ static void tegra_sor_hdmi_disable(struct drm_encoder *encoder) value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); if (!sor->soc->has_nvdisplay) - value &= ~(SOR1_TIMING_CYA | SOR_ENABLE(1)); - else - value &= ~SOR_ENABLE(sor->index); + value &= ~SOR1_TIMING_CYA; + + value &= ~SOR_ENABLE(sor->index); tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); @@ -2559,16 +2399,34 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL); tegra_sor_writel(sor, value, SOR_XBAR_CTRL); - /* switch to parent clock */ - err = clk_set_parent(sor->clk, sor->clk_parent); + /* + * Switch the pad clock to the DP clock. Note that we cannot actually + * do this because Tegra186 and later don't support clk_set_parent() + * on the sorX_pad_clkout clocks. We already do the equivalent above + * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register. + */ +#if 0 + err = clk_set_parent(sor->clk_pad, sor->clk_dp); if (err < 0) { - dev_err(sor->dev, "failed to set parent clock: %d\n", err); + dev_err(sor->dev, "failed to select pad parent clock: %d\n", + err); return; } +#endif + /* switch the SOR clock to the pad clock */ err = tegra_sor_set_parent_clock(sor, sor->clk_pad); if (err < 0) { - dev_err(sor->dev, "failed to set pad clock: %d\n", err); + dev_err(sor->dev, "failed to select SOR parent clock: %d\n", + err); + return; + } + + /* switch the output clock to the parent pixel clock */ + err = clk_set_parent(sor->clk, sor->clk_parent); + if (err < 0) { + dev_err(sor->dev, "failed to select output parent clock: %d\n", + err); return; } @@ -2774,9 +2632,9 @@ static void tegra_sor_hdmi_enable(struct drm_encoder *encoder) value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); if (!sor->soc->has_nvdisplay) - value |= SOR_ENABLE(1) | SOR1_TIMING_CYA; - else - value |= SOR_ENABLE(sor->index); + value |= SOR1_TIMING_CYA; + + value |= SOR_ENABLE(sor->index); tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); @@ -2803,6 +2661,396 @@ static const struct drm_encoder_helper_funcs tegra_sor_hdmi_helpers = { .atomic_check = tegra_sor_encoder_atomic_check, }; +static void tegra_sor_dp_disable(struct drm_encoder *encoder) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_sor *sor = to_sor(output); + u32 value; + int err; + + if (output->panel) + drm_panel_disable(output->panel); + + /* + * Do not attempt to power down a DP link if we're not connected since + * the AUX transactions would just be timing out. + */ + if (output->connector.status != connector_status_disconnected) { + err = drm_dp_link_power_down(sor->aux, &sor->link); + if (err < 0) + dev_err(sor->dev, "failed to power down link: %d\n", + err); + } + + err = tegra_sor_detach(sor); + if (err < 0) + dev_err(sor->dev, "failed to detach SOR: %d\n", err); + + tegra_sor_writel(sor, 0, SOR_STATE1); + tegra_sor_update(sor); + + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value &= ~SOR_ENABLE(sor->index); + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + tegra_dc_commit(dc); + + value = tegra_sor_readl(sor, SOR_STATE1); + value &= ~SOR_STATE_ASY_PROTOCOL_MASK; + value &= ~SOR_STATE_ASY_SUBOWNER_MASK; + value &= ~SOR_STATE_ASY_OWNER_MASK; + tegra_sor_writel(sor, value, SOR_STATE1); + tegra_sor_update(sor); + + /* switch to safe parent clock */ + err = tegra_sor_set_parent_clock(sor, sor->clk_safe); + if (err < 0) + dev_err(sor->dev, "failed to set safe clock: %d\n", err); + + err = tegra_sor_power_down(sor); + if (err < 0) + dev_err(sor->dev, "failed to power down SOR: %d\n", err); + + err = tegra_io_pad_power_disable(sor->pad); + if (err < 0) + dev_err(sor->dev, "failed to power off I/O pad: %d\n", err); + + err = drm_dp_aux_disable(sor->aux); + if (err < 0) + dev_err(sor->dev, "failed disable DPAUX: %d\n", err); + + if (output->panel) + drm_panel_unprepare(output->panel); + + pm_runtime_put(sor->dev); +} + +static void tegra_sor_dp_enable(struct drm_encoder *encoder) +{ + struct tegra_output *output = encoder_to_output(encoder); + struct tegra_dc *dc = to_tegra_dc(encoder->crtc); + struct tegra_sor *sor = to_sor(output); + struct tegra_sor_config config; + struct tegra_sor_state *state; + struct drm_display_mode *mode; + struct drm_display_info *info; + unsigned int i; + u32 value; + int err; + + state = to_sor_state(output->connector.state); + mode = &encoder->crtc->state->adjusted_mode; + info = &output->connector.display_info; + + pm_runtime_get_sync(sor->dev); + + /* switch to safe parent clock */ + err = tegra_sor_set_parent_clock(sor, sor->clk_safe); + if (err < 0) + dev_err(sor->dev, "failed to set safe parent clock: %d\n", err); + + err = tegra_io_pad_power_enable(sor->pad); + if (err < 0) + dev_err(sor->dev, "failed to power on LVDS rail: %d\n", err); + + usleep_range(20, 100); + + err = drm_dp_aux_enable(sor->aux); + if (err < 0) + dev_err(sor->dev, "failed to enable DPAUX: %d\n", err); + + err = drm_dp_link_probe(sor->aux, &sor->link); + if (err < 0) + dev_err(sor->dev, "failed to probe DP link: %d\n", err); + + tegra_sor_filter_rates(sor); + + err = drm_dp_link_choose(&sor->link, mode, info); + if (err < 0) + dev_err(sor->dev, "failed to choose link: %d\n", err); + + if (output->panel) + drm_panel_prepare(output->panel); + + value = tegra_sor_readl(sor, sor->soc->regs->pll2); + value &= ~SOR_PLL2_BANDGAP_POWERDOWN; + tegra_sor_writel(sor, value, sor->soc->regs->pll2); + + usleep_range(20, 40); + + value = tegra_sor_readl(sor, sor->soc->regs->pll3); + value |= SOR_PLL3_PLL_VDD_MODE_3V3; + tegra_sor_writel(sor, value, sor->soc->regs->pll3); + + value = tegra_sor_readl(sor, sor->soc->regs->pll0); + value &= ~(SOR_PLL0_VCOPD | SOR_PLL0_PWR); + tegra_sor_writel(sor, value, sor->soc->regs->pll0); + + value = tegra_sor_readl(sor, sor->soc->regs->pll2); + value &= ~SOR_PLL2_SEQ_PLLCAPPD_ENFORCE; + value |= SOR_PLL2_SEQ_PLLCAPPD; + tegra_sor_writel(sor, value, sor->soc->regs->pll2); + + usleep_range(200, 400); + + value = tegra_sor_readl(sor, sor->soc->regs->pll2); + value &= ~SOR_PLL2_POWERDOWN_OVERRIDE; + value &= ~SOR_PLL2_PORT_POWERDOWN; + tegra_sor_writel(sor, value, sor->soc->regs->pll2); + + value = tegra_sor_readl(sor, SOR_CLK_CNTRL); + value &= ~SOR_CLK_CNTRL_DP_CLK_SEL_MASK; + + if (output->panel) + value |= SOR_CLK_CNTRL_DP_CLK_SEL_SINGLE_DPCLK; + else + value |= SOR_CLK_CNTRL_DP_CLK_SEL_DIFF_DPCLK; + + tegra_sor_writel(sor, value, SOR_CLK_CNTRL); + + usleep_range(200, 400); + + value = tegra_sor_readl(sor, SOR_DP_SPARE0); + /* XXX not in TRM */ + if (output->panel) + value |= SOR_DP_SPARE_PANEL_INTERNAL; + else + value &= ~SOR_DP_SPARE_PANEL_INTERNAL; + + value |= SOR_DP_SPARE_SEQ_ENABLE; + tegra_sor_writel(sor, value, SOR_DP_SPARE0); + + /* XXX not in TRM */ + tegra_sor_writel(sor, 0, SOR_LVDS); + + value = tegra_sor_readl(sor, sor->soc->regs->pll0); + value &= ~SOR_PLL0_ICHPMP_MASK; + value &= ~SOR_PLL0_VCOCAP_MASK; + value |= SOR_PLL0_ICHPMP(0x1); + value |= SOR_PLL0_VCOCAP(0x3); + value |= SOR_PLL0_RESISTOR_EXT; + tegra_sor_writel(sor, value, sor->soc->regs->pll0); + + /* XXX not in TRM */ + for (value = 0, i = 0; i < 5; i++) + value |= SOR_XBAR_CTRL_LINK0_XSEL(i, sor->soc->xbar_cfg[i]) | + SOR_XBAR_CTRL_LINK1_XSEL(i, i); + + tegra_sor_writel(sor, 0x00000000, SOR_XBAR_POL); + tegra_sor_writel(sor, value, SOR_XBAR_CTRL); + + /* + * Switch the pad clock to the DP clock. Note that we cannot actually + * do this because Tegra186 and later don't support clk_set_parent() + * on the sorX_pad_clkout clocks. We already do the equivalent above + * using the DP_CLK_SEL mux of the SOR_CLK_CNTRL register. + */ +#if 0 + err = clk_set_parent(sor->clk_pad, sor->clk_parent); + if (err < 0) { + dev_err(sor->dev, "failed to select pad parent clock: %d\n", + err); + return; + } +#endif + + /* switch the SOR clock to the pad clock */ + err = tegra_sor_set_parent_clock(sor, sor->clk_pad); + if (err < 0) { + dev_err(sor->dev, "failed to select SOR parent clock: %d\n", + err); + return; + } + + /* switch the output clock to the parent pixel clock */ + err = clk_set_parent(sor->clk, sor->clk_parent); + if (err < 0) { + dev_err(sor->dev, "failed to select output parent clock: %d\n", + err); + return; + } + + /* use DP-A protocol */ + value = tegra_sor_readl(sor, SOR_STATE1); + value &= ~SOR_STATE_ASY_PROTOCOL_MASK; + value |= SOR_STATE_ASY_PROTOCOL_DP_A; + tegra_sor_writel(sor, value, SOR_STATE1); + + /* enable port */ + value = tegra_sor_readl(sor, SOR_DP_LINKCTL0); + value |= SOR_DP_LINKCTL_ENABLE; + tegra_sor_writel(sor, value, SOR_DP_LINKCTL0); + + tegra_sor_dp_term_calibrate(sor); + + err = drm_dp_link_train(&sor->link); + if (err < 0) + dev_err(sor->dev, "link training failed: %d\n", err); + else + dev_dbg(sor->dev, "link training succeeded\n"); + + err = drm_dp_link_power_up(sor->aux, &sor->link); + if (err < 0) + dev_err(sor->dev, "failed to power up DP link: %d\n", err); + + /* compute configuration */ + memset(&config, 0, sizeof(config)); + config.bits_per_pixel = state->bpc * 3; + + err = tegra_sor_compute_config(sor, mode, &config, &sor->link); + if (err < 0) + dev_err(sor->dev, "failed to compute configuration: %d\n", err); + + tegra_sor_apply_config(sor, &config); + tegra_sor_mode_set(sor, mode, state); + + if (output->panel) { + /* CSTM (LVDS, link A/B, upper) */ + value = SOR_CSTM_LVDS | SOR_CSTM_LINK_ACT_A | SOR_CSTM_LINK_ACT_B | + SOR_CSTM_UPPER; + tegra_sor_writel(sor, value, SOR_CSTM); + + /* PWM setup */ + err = tegra_sor_setup_pwm(sor, 250); + if (err < 0) + dev_err(sor->dev, "failed to setup PWM: %d\n", err); + } + + tegra_sor_update(sor); + + err = tegra_sor_power_up(sor, 250); + if (err < 0) + dev_err(sor->dev, "failed to power up SOR: %d\n", err); + + /* attach and wake up */ + err = tegra_sor_attach(sor); + if (err < 0) + dev_err(sor->dev, "failed to attach SOR: %d\n", err); + + value = tegra_dc_readl(dc, DC_DISP_DISP_WIN_OPTIONS); + value |= SOR_ENABLE(sor->index); + tegra_dc_writel(dc, value, DC_DISP_DISP_WIN_OPTIONS); + + tegra_dc_commit(dc); + + err = tegra_sor_wakeup(sor); + if (err < 0) + dev_err(sor->dev, "failed to wakeup SOR: %d\n", err); + + if (output->panel) + drm_panel_enable(output->panel); +} + +static const struct drm_encoder_helper_funcs tegra_sor_dp_helpers = { + .disable = tegra_sor_dp_disable, + .enable = tegra_sor_dp_enable, + .atomic_check = tegra_sor_encoder_atomic_check, +}; + +static int tegra_sor_hdmi_probe(struct tegra_sor *sor) +{ + int err; + + sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io"); + if (IS_ERR(sor->avdd_io_supply)) { + dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n", + PTR_ERR(sor->avdd_io_supply)); + return PTR_ERR(sor->avdd_io_supply); + } + + err = regulator_enable(sor->avdd_io_supply); + if (err < 0) { + dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n", + err); + return err; + } + + sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll"); + if (IS_ERR(sor->vdd_pll_supply)) { + dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n", + PTR_ERR(sor->vdd_pll_supply)); + return PTR_ERR(sor->vdd_pll_supply); + } + + err = regulator_enable(sor->vdd_pll_supply); + if (err < 0) { + dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n", + err); + return err; + } + + sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi"); + if (IS_ERR(sor->hdmi_supply)) { + dev_err(sor->dev, "cannot get HDMI supply: %ld\n", + PTR_ERR(sor->hdmi_supply)); + return PTR_ERR(sor->hdmi_supply); + } + + err = regulator_enable(sor->hdmi_supply); + if (err < 0) { + dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err); + return err; + } + + INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work); + + return 0; +} + +static int tegra_sor_hdmi_remove(struct tegra_sor *sor) +{ + regulator_disable(sor->hdmi_supply); + regulator_disable(sor->vdd_pll_supply); + regulator_disable(sor->avdd_io_supply); + + return 0; +} + +static const struct tegra_sor_ops tegra_sor_hdmi_ops = { + .name = "HDMI", + .probe = tegra_sor_hdmi_probe, + .remove = tegra_sor_hdmi_remove, + .audio_enable = tegra_sor_hdmi_audio_enable, + .audio_disable = tegra_sor_hdmi_audio_disable, +}; + +static int tegra_sor_dp_probe(struct tegra_sor *sor) +{ + int err; + + sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io-hdmi-dp"); + if (IS_ERR(sor->avdd_io_supply)) + return PTR_ERR(sor->avdd_io_supply); + + err = regulator_enable(sor->avdd_io_supply); + if (err < 0) + return err; + + sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-hdmi-dp-pll"); + if (IS_ERR(sor->vdd_pll_supply)) + return PTR_ERR(sor->vdd_pll_supply); + + err = regulator_enable(sor->vdd_pll_supply); + if (err < 0) + return err; + + return 0; +} + +static int tegra_sor_dp_remove(struct tegra_sor *sor) +{ + regulator_disable(sor->vdd_pll_supply); + regulator_disable(sor->avdd_io_supply); + + return 0; +} + +static const struct tegra_sor_ops tegra_sor_dp_ops = { + .name = "DP", + .probe = tegra_sor_dp_probe, + .remove = tegra_sor_dp_remove, +}; + static int tegra_sor_init(struct host1x_client *client) { struct drm_device *drm = dev_get_drvdata(client->parent); @@ -2810,11 +3058,10 @@ static int tegra_sor_init(struct host1x_client *client) struct tegra_sor *sor = host1x_client_to_sor(client); int connector = DRM_MODE_CONNECTOR_Unknown; int encoder = DRM_MODE_ENCODER_NONE; - u32 value; int err; if (!sor->aux) { - if (sor->soc->supports_hdmi) { + if (sor->ops == &tegra_sor_hdmi_ops) { connector = DRM_MODE_CONNECTOR_HDMIA; encoder = DRM_MODE_ENCODER_TMDS; helpers = &tegra_sor_hdmi_helpers; @@ -2823,14 +3070,18 @@ static int tegra_sor_init(struct host1x_client *client) encoder = DRM_MODE_ENCODER_LVDS; } } else { - if (sor->soc->supports_edp) { + if (sor->output.panel) { connector = DRM_MODE_CONNECTOR_eDP; encoder = DRM_MODE_ENCODER_TMDS; - helpers = &tegra_sor_edp_helpers; - } else if (sor->soc->supports_dp) { + helpers = &tegra_sor_dp_helpers; + } else { connector = DRM_MODE_CONNECTOR_DisplayPort; encoder = DRM_MODE_ENCODER_TMDS; + helpers = &tegra_sor_dp_helpers; } + + sor->link.ops = &tegra_sor_dp_link_ops; + sor->link.aux = sor->aux; } sor->output.dev = sor->dev; @@ -2913,15 +3164,6 @@ static int tegra_sor_init(struct host1x_client *client) if (err < 0) return err; - /* - * Enable and unmask the HDA codec SCRATCH0 register interrupt. This - * is used for interoperability between the HDA codec driver and the - * HDMI/DP driver. - */ - value = SOR_INT_CODEC_SCRATCH1 | SOR_INT_CODEC_SCRATCH0; - tegra_sor_writel(sor, value, SOR_INT_ENABLE); - tegra_sor_writel(sor, value, SOR_INT_MASK); - return 0; } @@ -2930,9 +3172,6 @@ static int tegra_sor_exit(struct host1x_client *client) struct tegra_sor *sor = host1x_client_to_sor(client); int err; - tegra_sor_writel(sor, 0, SOR_INT_MASK); - tegra_sor_writel(sor, 0, SOR_INT_ENABLE); - tegra_output_exit(&sor->output); if (sor->aux) { @@ -2955,75 +3194,6 @@ static const struct host1x_client_ops sor_client_ops = { .exit = tegra_sor_exit, }; -static const struct tegra_sor_ops tegra_sor_edp_ops = { - .name = "eDP", -}; - -static int tegra_sor_hdmi_probe(struct tegra_sor *sor) -{ - int err; - - sor->avdd_io_supply = devm_regulator_get(sor->dev, "avdd-io"); - if (IS_ERR(sor->avdd_io_supply)) { - dev_err(sor->dev, "cannot get AVDD I/O supply: %ld\n", - PTR_ERR(sor->avdd_io_supply)); - return PTR_ERR(sor->avdd_io_supply); - } - - err = regulator_enable(sor->avdd_io_supply); - if (err < 0) { - dev_err(sor->dev, "failed to enable AVDD I/O supply: %d\n", - err); - return err; - } - - sor->vdd_pll_supply = devm_regulator_get(sor->dev, "vdd-pll"); - if (IS_ERR(sor->vdd_pll_supply)) { - dev_err(sor->dev, "cannot get VDD PLL supply: %ld\n", - PTR_ERR(sor->vdd_pll_supply)); - return PTR_ERR(sor->vdd_pll_supply); - } - - err = regulator_enable(sor->vdd_pll_supply); - if (err < 0) { - dev_err(sor->dev, "failed to enable VDD PLL supply: %d\n", - err); - return err; - } - - sor->hdmi_supply = devm_regulator_get(sor->dev, "hdmi"); - if (IS_ERR(sor->hdmi_supply)) { - dev_err(sor->dev, "cannot get HDMI supply: %ld\n", - PTR_ERR(sor->hdmi_supply)); - return PTR_ERR(sor->hdmi_supply); - } - - err = regulator_enable(sor->hdmi_supply); - if (err < 0) { - dev_err(sor->dev, "failed to enable HDMI supply: %d\n", err); - return err; - } - - INIT_DELAYED_WORK(&sor->scdc, tegra_sor_hdmi_scdc_work); - - return 0; -} - -static int tegra_sor_hdmi_remove(struct tegra_sor *sor) -{ - regulator_disable(sor->hdmi_supply); - regulator_disable(sor->vdd_pll_supply); - regulator_disable(sor->avdd_io_supply); - - return 0; -} - -static const struct tegra_sor_ops tegra_sor_hdmi_ops = { - .name = "HDMI", - .probe = tegra_sor_hdmi_probe, - .remove = tegra_sor_hdmi_remove, -}; - static const u8 tegra124_sor_xbar_cfg[5] = { 0, 1, 2, 3, 4 }; @@ -3043,14 +3213,161 @@ static const struct tegra_sor_regs tegra124_sor_regs = { .dp_padctl2 = 0x73, }; +/* Tegra124 and Tegra132 have lanes 0 and 2 swapped. */ +static const u8 tegra124_sor_lane_map[4] = { + 2, 1, 0, 3, +}; + +static const u8 tegra124_sor_voltage_swing[4][4][4] = { + { + { 0x13, 0x19, 0x1e, 0x28 }, + { 0x1e, 0x25, 0x2d, }, + { 0x28, 0x32, }, + { 0x3c, }, + }, { + { 0x12, 0x17, 0x1b, 0x25 }, + { 0x1c, 0x23, 0x2a, }, + { 0x25, 0x2f, }, + { 0x39, } + }, { + { 0x12, 0x16, 0x1a, 0x22 }, + { 0x1b, 0x20, 0x27, }, + { 0x24, 0x2d, }, + { 0x36, }, + }, { + { 0x11, 0x14, 0x17, 0x1f }, + { 0x19, 0x1e, 0x24, }, + { 0x22, 0x2a, }, + { 0x32, }, + }, +}; + +static const u8 tegra124_sor_pre_emphasis[4][4][4] = { + { + { 0x00, 0x09, 0x13, 0x25 }, + { 0x00, 0x0f, 0x1e, }, + { 0x00, 0x14, }, + { 0x00, }, + }, { + { 0x00, 0x0a, 0x14, 0x28 }, + { 0x00, 0x0f, 0x1e, }, + { 0x00, 0x14, }, + { 0x00 }, + }, { + { 0x00, 0x0a, 0x14, 0x28 }, + { 0x00, 0x0f, 0x1e, }, + { 0x00, 0x14, }, + { 0x00, }, + }, { + { 0x00, 0x0a, 0x14, 0x28 }, + { 0x00, 0x0f, 0x1e, }, + { 0x00, 0x14, }, + { 0x00, }, + }, +}; + +static const u8 tegra124_sor_post_cursor[4][4][4] = { + { + { 0x00, 0x00, 0x00, 0x00 }, + { 0x00, 0x00, 0x00, }, + { 0x00, 0x00, }, + { 0x00, }, + }, { + { 0x02, 0x02, 0x04, 0x05 }, + { 0x02, 0x04, 0x05, }, + { 0x04, 0x05, }, + { 0x05, }, + }, { + { 0x04, 0x05, 0x08, 0x0b }, + { 0x05, 0x09, 0x0b, }, + { 0x08, 0x0a, }, + { 0x0b, }, + }, { + { 0x05, 0x09, 0x0b, 0x12 }, + { 0x09, 0x0d, 0x12, }, + { 0x0b, 0x0f, }, + { 0x12, }, + }, +}; + +static const u8 tegra124_sor_tx_pu[4][4][4] = { + { + { 0x20, 0x30, 0x40, 0x60 }, + { 0x30, 0x40, 0x60, }, + { 0x40, 0x60, }, + { 0x60, }, + }, { + { 0x20, 0x20, 0x30, 0x50 }, + { 0x30, 0x40, 0x50, }, + { 0x40, 0x50, }, + { 0x60, }, + }, { + { 0x20, 0x20, 0x30, 0x40, }, + { 0x30, 0x30, 0x40, }, + { 0x40, 0x50, }, + { 0x60, }, + }, { + { 0x20, 0x20, 0x20, 0x40, }, + { 0x30, 0x30, 0x40, }, + { 0x40, 0x40, }, + { 0x60, }, + }, +}; + static const struct tegra_sor_soc tegra124_sor = { - .supports_edp = true, .supports_lvds = true, .supports_hdmi = false, - .supports_dp = false, + .supports_dp = true, + .supports_audio = false, + .supports_hdcp = false, .regs = &tegra124_sor_regs, .has_nvdisplay = false, .xbar_cfg = tegra124_sor_xbar_cfg, + .lane_map = tegra124_sor_lane_map, + .voltage_swing = tegra124_sor_voltage_swing, + .pre_emphasis = tegra124_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, +}; + +static const u8 tegra132_sor_pre_emphasis[4][4][4] = { + { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x01, 0x0e, 0x1d, }, + { 0x01, 0x13, }, + { 0x00, }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00 }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00, }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00, }, + }, +}; + +static const struct tegra_sor_soc tegra132_sor = { + .supports_lvds = true, + .supports_hdmi = false, + .supports_dp = true, + .supports_audio = false, + .supports_hdcp = false, + .regs = &tegra124_sor_regs, + .has_nvdisplay = false, + .xbar_cfg = tegra124_sor_xbar_cfg, + .lane_map = tegra124_sor_lane_map, + .voltage_swing = tegra124_sor_voltage_swing, + .pre_emphasis = tegra132_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct tegra_sor_regs tegra210_sor_regs = { @@ -3068,33 +3385,50 @@ static const struct tegra_sor_regs tegra210_sor_regs = { .dp_padctl2 = 0x73, }; +static const u8 tegra210_sor_xbar_cfg[5] = { + 2, 1, 0, 3, 4 +}; + +static const u8 tegra210_sor_lane_map[4] = { + 0, 1, 2, 3, +}; + static const struct tegra_sor_soc tegra210_sor = { - .supports_edp = true, .supports_lvds = false, .supports_hdmi = false, - .supports_dp = false, + .supports_dp = true, + .supports_audio = false, + .supports_hdcp = false, + .regs = &tegra210_sor_regs, .has_nvdisplay = false, - .xbar_cfg = tegra124_sor_xbar_cfg, -}; -static const u8 tegra210_sor_xbar_cfg[5] = { - 2, 1, 0, 3, 4 + .xbar_cfg = tegra210_sor_xbar_cfg, + .lane_map = tegra210_sor_lane_map, + .voltage_swing = tegra124_sor_voltage_swing, + .pre_emphasis = tegra124_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct tegra_sor_soc tegra210_sor1 = { - .supports_edp = false, .supports_lvds = false, .supports_hdmi = true, .supports_dp = true, + .supports_audio = true, + .supports_hdcp = true, .regs = &tegra210_sor_regs, .has_nvdisplay = false, .num_settings = ARRAY_SIZE(tegra210_sor_hdmi_defaults), .settings = tegra210_sor_hdmi_defaults, - .xbar_cfg = tegra210_sor_xbar_cfg, + .lane_map = tegra210_sor_lane_map, + .voltage_swing = tegra124_sor_voltage_swing, + .pre_emphasis = tegra124_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct tegra_sor_regs tegra186_sor_regs = { @@ -3112,31 +3446,72 @@ static const struct tegra_sor_regs tegra186_sor_regs = { .dp_padctl2 = 0x16a, }; -static const struct tegra_sor_soc tegra186_sor = { - .supports_edp = false, - .supports_lvds = false, - .supports_hdmi = false, - .supports_dp = true, - - .regs = &tegra186_sor_regs, - .has_nvdisplay = true, +static const u8 tegra186_sor_voltage_swing[4][4][4] = { + { + { 0x13, 0x19, 0x1e, 0x28 }, + { 0x1e, 0x25, 0x2d, }, + { 0x28, 0x32, }, + { 0x39, }, + }, { + { 0x12, 0x16, 0x1b, 0x25 }, + { 0x1c, 0x23, 0x2a, }, + { 0x25, 0x2f, }, + { 0x37, } + }, { + { 0x12, 0x16, 0x1a, 0x22 }, + { 0x1b, 0x20, 0x27, }, + { 0x24, 0x2d, }, + { 0x35, }, + }, { + { 0x11, 0x14, 0x17, 0x1f }, + { 0x19, 0x1e, 0x24, }, + { 0x22, 0x2a, }, + { 0x32, }, + }, +}; - .xbar_cfg = tegra124_sor_xbar_cfg, +static const u8 tegra186_sor_pre_emphasis[4][4][4] = { + { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x01, 0x0e, 0x1d, }, + { 0x01, 0x13, }, + { 0x00, }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00 }, + }, { + { 0x00, 0x08, 0x14, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00, }, + }, { + { 0x00, 0x08, 0x12, 0x24 }, + { 0x00, 0x0e, 0x1d, }, + { 0x00, 0x13, }, + { 0x00, }, + }, }; -static const struct tegra_sor_soc tegra186_sor1 = { - .supports_edp = false, +static const struct tegra_sor_soc tegra186_sor = { .supports_lvds = false, .supports_hdmi = true, .supports_dp = true, + .supports_audio = true, + .supports_hdcp = true, .regs = &tegra186_sor_regs, .has_nvdisplay = true, .num_settings = ARRAY_SIZE(tegra186_sor_hdmi_defaults), .settings = tegra186_sor_hdmi_defaults, - .xbar_cfg = tegra124_sor_xbar_cfg, + .lane_map = tegra124_sor_lane_map, + .voltage_swing = tegra186_sor_voltage_swing, + .pre_emphasis = tegra186_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct tegra_sor_regs tegra194_sor_regs = { @@ -3155,10 +3530,11 @@ static const struct tegra_sor_regs tegra194_sor_regs = { }; static const struct tegra_sor_soc tegra194_sor = { - .supports_edp = true, .supports_lvds = false, .supports_hdmi = true, .supports_dp = true, + .supports_audio = true, + .supports_hdcp = true, .regs = &tegra194_sor_regs, .has_nvdisplay = true, @@ -3167,14 +3543,19 @@ static const struct tegra_sor_soc tegra194_sor = { .settings = tegra194_sor_hdmi_defaults, .xbar_cfg = tegra210_sor_xbar_cfg, + .lane_map = tegra124_sor_lane_map, + .voltage_swing = tegra186_sor_voltage_swing, + .pre_emphasis = tegra186_sor_pre_emphasis, + .post_cursor = tegra124_sor_post_cursor, + .tx_pu = tegra124_sor_tx_pu, }; static const struct of_device_id tegra_sor_of_match[] = { { .compatible = "nvidia,tegra194-sor", .data = &tegra194_sor }, - { .compatible = "nvidia,tegra186-sor1", .data = &tegra186_sor1 }, { .compatible = "nvidia,tegra186-sor", .data = &tegra186_sor }, { .compatible = "nvidia,tegra210-sor1", .data = &tegra210_sor1 }, { .compatible = "nvidia,tegra210-sor", .data = &tegra210_sor }, + { .compatible = "nvidia,tegra132-sor", .data = &tegra132_sor }, { .compatible = "nvidia,tegra124-sor", .data = &tegra124_sor }, { }, }; @@ -3200,6 +3581,11 @@ static int tegra_sor_parse_dt(struct tegra_sor *sor) * earlier */ sor->pad = TEGRA_IO_PAD_HDMI_DP0 + sor->index; + } else { + if (!sor->soc->supports_audio) + sor->index = 0; + else + sor->index = 1; } err = of_property_read_u32_array(np, "nvidia,xbar-cfg", xbar_cfg, 5); @@ -3234,9 +3620,11 @@ static irqreturn_t tegra_sor_irq(int irq, void *data) tegra_hda_parse_format(format, &sor->format); - tegra_sor_hdmi_audio_enable(sor); + if (sor->ops->audio_enable) + sor->ops->audio_enable(sor); } else { - tegra_sor_hdmi_audio_disable(sor); + if (sor->ops->audio_disable) + sor->ops->audio_disable(sor); } } @@ -3273,6 +3661,8 @@ static int tegra_sor_probe(struct platform_device *pdev) if (!sor->aux) return -EPROBE_DEFER; + + sor->output.ddc = &sor->aux->ddc; } if (!sor->aux) { @@ -3287,16 +3677,15 @@ static int tegra_sor_probe(struct platform_device *pdev) return -ENODEV; } } else { - if (sor->soc->supports_edp) { - sor->ops = &tegra_sor_edp_ops; - sor->pad = TEGRA_IO_PAD_LVDS; - } else if (sor->soc->supports_dp) { - dev_err(&pdev->dev, "DisplayPort not supported yet\n"); - return -ENODEV; - } else { - dev_err(&pdev->dev, "unknown (DP) support\n"); - return -ENODEV; - } + np = of_parse_phandle(pdev->dev.of_node, "nvidia,panel", 0); + /* + * No need to keep this around since we only use it as a check + * to see if a panel is connected (eDP) or not (DP). + */ + of_node_put(np); + + sor->ops = &tegra_sor_dp_ops; + sor->pad = TEGRA_IO_PAD_LVDS; } err = tegra_sor_parse_dt(sor); @@ -3451,6 +3840,8 @@ static int tegra_sor_probe(struct platform_device *pdev) * pad output clock. */ if (!sor->clk_pad) { + char *name; + err = pm_runtime_get_sync(&pdev->dev); if (err < 0) { dev_err(&pdev->dev, "failed to get runtime PM: %d\n", @@ -3458,8 +3849,13 @@ static int tegra_sor_probe(struct platform_device *pdev) goto remove; } - sor->clk_pad = tegra_clk_sor_pad_register(sor, - "sor1_pad_clkout"); + name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "sor%u_pad_clkout", sor->index); + if (!name) { + err = -ENOMEM; + goto remove; + } + + sor->clk_pad = tegra_clk_sor_pad_register(sor, name); pm_runtime_put(&pdev->dev); } diff --git a/drivers/gpu/drm/tegra/sor.h b/drivers/gpu/drm/tegra/sor.h index f8efd8be4b7c..00e09d5dca30 100644 --- a/drivers/gpu/drm/tegra/sor.h +++ b/drivers/gpu/drm/tegra/sor.h @@ -39,6 +39,7 @@ #define SOR_STATE_ASY_CRC_MODE_NON_ACTIVE (0x2 << 6) #define SOR_STATE_ASY_CRC_MODE_COMPLETE (0x1 << 6) #define SOR_STATE_ASY_CRC_MODE_ACTIVE (0x0 << 6) +#define SOR_STATE_ASY_SUBOWNER_MASK (0x3 << 4) #define SOR_STATE_ASY_OWNER_MASK 0xf #define SOR_STATE_ASY_OWNER(x) (((x) & 0xf) << 0) @@ -283,10 +284,12 @@ #define SOR_DP_PADCTL_CM_TXD_2 (1 << 6) #define SOR_DP_PADCTL_CM_TXD_1 (1 << 5) #define SOR_DP_PADCTL_CM_TXD_0 (1 << 4) +#define SOR_DP_PADCTL_CM_TXD(x) (1 << (4 + (x))) #define SOR_DP_PADCTL_PD_TXD_3 (1 << 3) #define SOR_DP_PADCTL_PD_TXD_0 (1 << 2) #define SOR_DP_PADCTL_PD_TXD_1 (1 << 1) #define SOR_DP_PADCTL_PD_TXD_2 (1 << 0) +#define SOR_DP_PADCTL_PD_TXD(x) (1 << (0 + (x))) #define SOR_DP_PADCTL1 0x5d diff --git a/drivers/gpu/drm/tegra/vic.c b/drivers/gpu/drm/tegra/vic.c index cd0399fd8c63..9444ba183990 100644 --- a/drivers/gpu/drm/tegra/vic.c +++ b/drivers/gpu/drm/tegra/vic.c @@ -34,7 +34,6 @@ struct vic { void __iomem *regs; struct tegra_drm_client client; struct host1x_channel *channel; - struct iommu_domain *domain; struct device *dev; struct clk *clk; struct reset_control *rst; @@ -97,6 +96,9 @@ static int vic_runtime_suspend(struct device *dev) static int vic_boot(struct vic *vic) { +#ifdef CONFIG_IOMMU_API + struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); +#endif u32 fce_ucode_size, fce_bin_data_offset; void *hdr; int err = 0; @@ -105,15 +107,14 @@ static int vic_boot(struct vic *vic) return 0; #ifdef CONFIG_IOMMU_API - if (vic->config->supports_sid) { - struct iommu_fwspec *spec = dev_iommu_fwspec_get(vic->dev); + if (vic->config->supports_sid && spec) { u32 value; value = TRANSCFG_ATT(1, TRANSCFG_SID_FALCON) | TRANSCFG_ATT(0, TRANSCFG_SID_HW); vic_writel(vic, value, VIC_TFBIF_TRANSCFG); - if (spec && spec->num_ids > 0) { + if (spec->num_ids > 0) { value = spec->ids[0] & 0xffff; vic_writel(vic, value, VIC_THI_STREAMID0); @@ -132,9 +133,9 @@ static int vic_boot(struct vic *vic) if (err < 0) return err; - hdr = vic->falcon.firmware.vaddr; + hdr = vic->falcon.firmware.virt; fce_bin_data_offset = *(u32 *)(hdr + VIC_UCODE_FCE_DATA_OFFSET); - hdr = vic->falcon.firmware.vaddr + + hdr = vic->falcon.firmware.virt + *(u32 *)(hdr + VIC_UCODE_FCE_HEADER_OFFSET); fce_ucode_size = *(u32 *)(hdr + FCE_UCODE_SIZE_OFFSET); @@ -142,7 +143,7 @@ static int vic_boot(struct vic *vic) falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_SIZE, fce_ucode_size); falcon_execute_method(&vic->falcon, VIC_SET_FCE_UCODE_OFFSET, - (vic->falcon.firmware.paddr + fce_bin_data_offset) + (vic->falcon.firmware.iova + fce_bin_data_offset) >> 8); err = falcon_wait_idle(&vic->falcon); @@ -157,48 +158,21 @@ static int vic_boot(struct vic *vic) return 0; } -static void *vic_falcon_alloc(struct falcon *falcon, size_t size, - dma_addr_t *iova) -{ - struct tegra_drm *tegra = falcon->data; - - return tegra_drm_alloc(tegra, size, iova); -} - -static void vic_falcon_free(struct falcon *falcon, size_t size, - dma_addr_t iova, void *va) -{ - struct tegra_drm *tegra = falcon->data; - - return tegra_drm_free(tegra, size, va, iova); -} - -static const struct falcon_ops vic_falcon_ops = { - .alloc = vic_falcon_alloc, - .free = vic_falcon_free -}; - static int vic_init(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct iommu_group *group = iommu_group_get(client->dev); struct drm_device *dev = dev_get_drvdata(client->parent); struct tegra_drm *tegra = dev->dev_private; struct vic *vic = to_vic(drm); int err; - if (group && tegra->domain) { - err = iommu_attach_group(tegra->domain, group); - if (err < 0) { - dev_err(vic->dev, "failed to attach to domain: %d\n", - err); - return err; - } - - vic->domain = tegra->domain; + err = host1x_client_iommu_attach(client); + if (err < 0) { + dev_err(vic->dev, "failed to attach to domain: %d\n", err); + return err; } - vic->channel = host1x_channel_request(client->dev); + vic->channel = host1x_channel_request(client); if (!vic->channel) { err = -ENOMEM; goto detach; @@ -214,6 +188,12 @@ static int vic_init(struct host1x_client *client) if (err < 0) goto free_syncpt; + /* + * Inherit the DMA parameters (such as maximum segment size) from the + * parent device. + */ + client->dev->dma_parms = client->parent->dma_parms; + return 0; free_syncpt: @@ -221,8 +201,7 @@ free_syncpt: free_channel: host1x_channel_put(vic->channel); detach: - if (group && tegra->domain) - iommu_detach_group(tegra->domain, group); + host1x_client_iommu_detach(client); return err; } @@ -230,22 +209,32 @@ detach: static int vic_exit(struct host1x_client *client) { struct tegra_drm_client *drm = host1x_to_drm_client(client); - struct iommu_group *group = iommu_group_get(client->dev); struct drm_device *dev = dev_get_drvdata(client->parent); struct tegra_drm *tegra = dev->dev_private; struct vic *vic = to_vic(drm); int err; + /* avoid a dangling pointer just in case this disappears */ + client->dev->dma_parms = NULL; + err = tegra_drm_unregister_client(tegra, drm); if (err < 0) return err; host1x_syncpt_free(client->syncpts[0]); host1x_channel_put(vic->channel); - - if (vic->domain) { - iommu_detach_group(vic->domain, group); - vic->domain = NULL; + host1x_client_iommu_detach(client); + + if (client->group) { + dma_unmap_single(vic->dev, vic->falcon.firmware.phys, + vic->falcon.firmware.size, DMA_TO_DEVICE); + tegra_drm_free(tegra, vic->falcon.firmware.size, + vic->falcon.firmware.virt, + vic->falcon.firmware.iova); + } else { + dma_free_coherent(vic->dev, vic->falcon.firmware.size, + vic->falcon.firmware.virt, + vic->falcon.firmware.iova); } return 0; @@ -258,25 +247,64 @@ static const struct host1x_client_ops vic_client_ops = { static int vic_load_firmware(struct vic *vic) { + struct host1x_client *client = &vic->client.base; + struct tegra_drm *tegra = vic->client.drm; + dma_addr_t iova; + size_t size; + void *virt; int err; - if (vic->falcon.data) + if (vic->falcon.firmware.virt) return 0; - vic->falcon.data = vic->client.drm; - err = falcon_read_firmware(&vic->falcon, vic->config->firmware); if (err < 0) - goto cleanup; + return err; + + size = vic->falcon.firmware.size; + + if (!client->group) { + virt = dma_alloc_coherent(vic->dev, size, &iova, GFP_KERNEL); + + err = dma_mapping_error(vic->dev, iova); + if (err < 0) + return err; + } else { + virt = tegra_drm_alloc(tegra, size, &iova); + } + + vic->falcon.firmware.virt = virt; + vic->falcon.firmware.iova = iova; err = falcon_load_firmware(&vic->falcon); if (err < 0) goto cleanup; + /* + * In this case we have received an IOVA from the shared domain, so we + * need to make sure to get the physical address so that the DMA API + * knows what memory pages to flush the cache for. + */ + if (client->group) { + dma_addr_t phys; + + phys = dma_map_single(vic->dev, virt, size, DMA_TO_DEVICE); + + err = dma_mapping_error(vic->dev, phys); + if (err < 0) + goto cleanup; + + vic->falcon.firmware.phys = phys; + } + return 0; cleanup: - vic->falcon.data = NULL; + if (!client->group) + dma_free_coherent(vic->dev, size, virt, iova); + else + tegra_drm_free(tegra, size, virt, iova); + return err; } @@ -374,6 +402,13 @@ static int vic_probe(struct platform_device *pdev) struct vic *vic; int err; + /* inherit DMA mask from host1x parent */ + err = dma_coerce_mask_and_coherent(dev, *dev->parent->dma_mask); + if (err < 0) { + dev_err(&pdev->dev, "failed to set DMA mask: %d\n", err); + return err; + } + vic = devm_kzalloc(dev, sizeof(*vic), GFP_KERNEL); if (!vic) return -ENOMEM; @@ -410,7 +445,6 @@ static int vic_probe(struct platform_device *pdev) vic->falcon.dev = dev; vic->falcon.regs = vic->regs; - vic->falcon.ops = &vic_falcon_ops; err = falcon_init(&vic->falcon); if (err < 0) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c index 43d756b7810e..51d034e095f4 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_external.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c @@ -8,6 +8,7 @@ #include <linux/of_graph.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_of.h> #include "tilcdc_drv.h" @@ -139,8 +140,8 @@ int tilcdc_attach_external_device(struct drm_device *ddev) } if (panel) { - bridge = devm_drm_panel_bridge_add(ddev->dev, panel, - DRM_MODE_CONNECTOR_DPI); + bridge = devm_drm_panel_bridge_add_typed(ddev->dev, panel, + DRM_MODE_CONNECTOR_DPI); if (IS_ERR(bridge)) { ret = PTR_ERR(bridge); goto err_encoder_cleanup; diff --git a/drivers/gpu/drm/tilcdc/tilcdc_plane.c b/drivers/gpu/drm/tilcdc/tilcdc_plane.c index 3abb9641f212..e2090020b3a0 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_plane.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_plane.c @@ -11,7 +11,7 @@ #include "tilcdc_drv.h" -static struct drm_plane_funcs tilcdc_plane_funcs = { +static const struct drm_plane_funcs tilcdc_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, .destroy = drm_plane_cleanup, diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c index 03d0e2df6774..94fb1f593564 100644 --- a/drivers/gpu/drm/tiny/gm12u320.c +++ b/drivers/gpu/drm/tiny/gm12u320.c @@ -649,7 +649,7 @@ static void gm12u320_driver_release(struct drm_device *dev) kfree(gm12u320); } -DEFINE_DRM_GEM_SHMEM_FOPS(gm12u320_fops); +DEFINE_DRM_GEM_FOPS(gm12u320_fops); static struct drm_driver gm12u320_drm_driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_ATOMIC, diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile index 01fc670ce7a2..caea2a099496 100644 --- a/drivers/gpu/drm/ttm/Makefile +++ b/drivers/gpu/drm/ttm/Makefile @@ -4,8 +4,8 @@ ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \ ttm_bo_util.o ttm_bo_vm.o ttm_module.o \ - ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o \ - ttm_page_alloc_dma.o + ttm_execbuf_util.o ttm_page_alloc.o ttm_bo_manager.o ttm-$(CONFIG_AGP) += ttm_agp_backend.o +ttm-$(CONFIG_DRM_TTM_DMA_PAGE_POOL) += ttm_page_alloc_dma.o obj-$(CONFIG_DRM_TTM) += ttm.o diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index ea4d59eb8966..6050dc846894 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -51,7 +51,7 @@ struct ttm_agp_backend { static int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm); - struct page *dummy_read_page = ttm->bdev->glob->dummy_read_page; + struct page *dummy_read_page = ttm_bo_glob.dummy_read_page; struct drm_mm_node *node = bo_mem->mm_node; struct agp_memory *mem; int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 98819462f025..8d91b0428af1 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -51,6 +51,7 @@ static void ttm_bo_global_kobj_release(struct kobject *kobj); DEFINE_MUTEX(ttm_global_mutex); unsigned ttm_bo_glob_use_count; struct ttm_bo_global ttm_bo_glob; +EXPORT_SYMBOL(ttm_bo_glob); static struct attribute ttm_bo_count = { .name = "bo_count", @@ -148,23 +149,21 @@ static void ttm_bo_release_list(struct kref *list_kref) { struct ttm_buffer_object *bo = container_of(list_kref, struct ttm_buffer_object, list_kref); - struct ttm_bo_device *bdev = bo->bdev; size_t acc_size = bo->acc_size; BUG_ON(kref_read(&bo->list_kref)); BUG_ON(kref_read(&bo->kref)); - BUG_ON(atomic_read(&bo->cpu_writers)); BUG_ON(bo->mem.mm_node != NULL); BUG_ON(!list_empty(&bo->lru)); BUG_ON(!list_empty(&bo->ddestroy)); ttm_tt_destroy(bo->ttm); - atomic_dec(&bo->bdev->glob->bo_count); + atomic_dec(&ttm_bo_glob.bo_count); dma_fence_put(bo->moving); if (!ttm_bo_uses_embedded_gem_object(bo)) dma_resv_fini(&bo->base._resv); mutex_destroy(&bo->wu_mutex); bo->destroy(bo); - ttm_mem_global_free(bdev->glob->mem_glob, acc_size); + ttm_mem_global_free(&ttm_mem_glob, acc_size); } static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, @@ -188,23 +187,17 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo, if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm && !(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG | TTM_PAGE_FLAG_SWAPPED))) { - list_add_tail(&bo->swap, &bdev->glob->swap_lru[bo->priority]); + list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]); kref_get(&bo->list_kref); } } -void ttm_bo_add_to_lru(struct ttm_buffer_object *bo) -{ - ttm_bo_add_mem_to_lru(bo, &bo->mem); -} -EXPORT_SYMBOL(ttm_bo_add_to_lru); - static void ttm_bo_ref_bug(struct kref *list_kref) { BUG(); } -void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) +static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; bool notify = false; @@ -224,16 +217,6 @@ void ttm_bo_del_from_lru(struct ttm_buffer_object *bo) bdev->driver->del_from_lru_notify(bo); } -void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo) -{ - struct ttm_bo_global *glob = bo->bdev->glob; - - spin_lock(&glob->lru_lock); - ttm_bo_del_from_lru(bo); - spin_unlock(&glob->lru_lock); -} -EXPORT_SYMBOL(ttm_bo_del_sub_from_lru); - static void ttm_bo_bulk_move_set_pos(struct ttm_lru_bulk_move_pos *pos, struct ttm_buffer_object *bo) { @@ -248,7 +231,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo, dma_resv_assert_held(bo->base.resv); ttm_bo_del_from_lru(bo); - ttm_bo_add_to_lru(bo); + ttm_bo_add_mem_to_lru(bo, &bo->mem); if (bulk && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { switch (bo->mem.mem_type) { @@ -311,7 +294,7 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk) dma_resv_assert_held(pos->first->base.resv); dma_resv_assert_held(pos->last->base.resv); - lru = &pos->first->bdev->glob->swap_lru[i]; + lru = &ttm_bo_glob.swap_lru[i]; list_bulk_move_tail(lru, &pos->first->swap, &pos->last->swap); } } @@ -475,7 +458,6 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo) static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - struct ttm_bo_global *glob = bdev->glob; int ret; ret = ttm_bo_individualize_resv(bo); @@ -485,16 +467,16 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) */ dma_resv_wait_timeout_rcu(bo->base.resv, true, false, 30 * HZ); - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); goto error; } - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY; if (!ret) { if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) { ttm_bo_del_from_lru(bo); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); if (bo->base.resv != &bo->base._resv) dma_resv_unlock(&bo->base._resv); @@ -512,7 +494,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) */ if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; - ttm_bo_add_to_lru(bo); + ttm_bo_move_to_lru_tail(bo, NULL); } dma_resv_unlock(bo->base.resv); @@ -523,7 +505,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) error: kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); schedule_delayed_work(&bdev->wq, ((HZ / 100) < 1) ? 1 : HZ / 100); @@ -546,7 +528,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool interruptible, bool no_wait_gpu, bool unlock_resv) { - struct ttm_bo_global *glob = bo->bdev->glob; struct dma_resv *resv; int ret; @@ -565,7 +546,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, if (unlock_resv) dma_resv_unlock(bo->base.resv); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); lret = dma_resv_wait_timeout_rcu(resv, true, interruptible, @@ -576,7 +557,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, else if (lret == 0) return -EBUSY; - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); if (unlock_resv && !dma_resv_trylock(bo->base.resv)) { /* * We raced, and lost, someone else holds the reservation now, @@ -586,7 +567,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, * delayed destruction would succeed, so just return success * here. */ - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); return 0; } ret = 0; @@ -595,7 +576,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, if (ret || unlikely(list_empty(&bo->ddestroy))) { if (unlock_resv) dma_resv_unlock(bo->base.resv); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); return ret; } @@ -603,7 +584,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, list_del_init(&bo->ddestroy); kref_put(&bo->list_kref, ttm_bo_ref_bug); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); ttm_bo_cleanup_memtype_use(bo); if (unlock_resv) @@ -618,7 +599,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, */ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) { - struct ttm_bo_global *glob = bdev->glob; + struct ttm_bo_global *glob = &ttm_bo_glob; struct list_head removed; bool empty; @@ -676,7 +657,7 @@ static void ttm_bo_release(struct kref *kref) if (bo->bdev->driver->release_notify) bo->bdev->driver->release_notify(bo); - drm_vma_offset_remove(&bdev->vma_manager, &bo->base.vma_node); + drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node); ttm_mem_io_lock(man, false); ttm_mem_io_free_vm(bo); ttm_mem_io_unlock(man); @@ -842,13 +823,12 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, struct ww_acquire_ctx *ticket) { struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; - struct ttm_bo_global *glob = bdev->glob; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; bool locked = false; unsigned i; int ret; - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) { list_for_each_entry(bo, &man->lru[i], lru) { bool busy; @@ -880,7 +860,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, if (!bo) { if (busy_bo) kref_get(&busy_bo->list_kref); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket); if (busy_bo) kref_put(&busy_bo->list_kref, ttm_bo_release_list); @@ -896,17 +876,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, return ret; } - ttm_bo_del_from_lru(bo); - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); ret = ttm_bo_evict(bo, ctx); - if (locked) { + if (locked) ttm_bo_unreserve(bo); - } else { - spin_lock(&glob->lru_lock); - ttm_bo_add_to_lru(bo); - spin_unlock(&glob->lru_lock); - } kref_put(&bo->list_kref, ttm_bo_release_list); return ret; @@ -926,7 +900,8 @@ EXPORT_SYMBOL(ttm_bo_mem_put); */ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem) + struct ttm_mem_reg *mem, + bool no_wait_gpu) { struct dma_fence *fence; int ret; @@ -935,19 +910,22 @@ static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, fence = dma_fence_get(man->move); spin_unlock(&man->move_lock); - if (fence) { - dma_resv_add_shared_fence(bo->base.resv, fence); + if (!fence) + return 0; - ret = dma_resv_reserve_shared(bo->base.resv, 1); - if (unlikely(ret)) { - dma_fence_put(fence); - return ret; - } + if (no_wait_gpu) + return -EBUSY; - dma_fence_put(bo->moving); - bo->moving = fence; + dma_resv_add_shared_fence(bo->base.resv, fence); + + ret = dma_resv_reserve_shared(bo->base.resv, 1); + if (unlikely(ret)) { + dma_fence_put(fence); + return ret; } + dma_fence_put(bo->moving); + bo->moving = fence; return 0; } @@ -978,7 +956,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ret; } while (1); - return ttm_bo_add_move_fence(bo, man, mem); + return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); } static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, @@ -1068,12 +1046,10 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, mem->mem_type = mem_type; mem->placement = cur_flags; - if (bo->mem.mem_type < mem_type && !list_empty(&bo->lru)) { - spin_lock(&bo->bdev->glob->lru_lock); - ttm_bo_del_from_lru(bo); - ttm_bo_add_mem_to_lru(bo, mem); - spin_unlock(&bo->bdev->glob->lru_lock); - } + spin_lock(&ttm_bo_glob.lru_lock); + ttm_bo_del_from_lru(bo); + ttm_bo_add_mem_to_lru(bo, mem); + spin_unlock(&ttm_bo_glob.lru_lock); return 0; } @@ -1120,14 +1096,18 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, if (unlikely(ret)) goto error; - if (mem->mm_node) { - ret = ttm_bo_add_move_fence(bo, man, mem); - if (unlikely(ret)) { - (*man->func->put_node)(man, mem); - goto error; - } - return 0; + if (!mem->mm_node) + continue; + + ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); + if (unlikely(ret)) { + (*man->func->put_node)(man, mem); + if (ret == -EBUSY) + continue; + + goto error; } + return 0; } for (i = 0; i < placement->num_busy_placement; ++i) { @@ -1160,9 +1140,9 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, error: if (bo->mem.mem_type == TTM_PL_SYSTEM && !list_empty(&bo->lru)) { - spin_lock(&bo->bdev->glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); ttm_bo_move_to_lru_tail(bo, NULL); - spin_unlock(&bo->bdev->glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); } return ret; @@ -1286,9 +1266,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)) { + struct ttm_mem_global *mem_glob = &ttm_mem_glob; int ret = 0; unsigned long num_pages; - struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; bool locked; ret = ttm_mem_global_alloc(mem_glob, acc_size, ctx); @@ -1315,7 +1295,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, kref_init(&bo->kref); kref_init(&bo->list_kref); - atomic_set(&bo->cpu_writers, 0); INIT_LIST_HEAD(&bo->lru); INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); @@ -1349,7 +1328,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, dma_resv_init(&bo->base._resv); drm_vma_node_reset(&bo->base.vma_node); } - atomic_inc(&bo->bdev->glob->bo_count); + atomic_inc(&ttm_bo_glob.bo_count); /* * For ttm_bo_type_device buffers, allocate @@ -1357,7 +1336,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, */ if (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg) - ret = drm_vma_offset_add(&bdev->vma_manager, &bo->base.vma_node, + ret = drm_vma_offset_add(bdev->vma_manager, &bo->base.vma_node, bo->mem.num_pages); /* passed reservation objects should already be locked, @@ -1379,11 +1358,9 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, return ret; } - if (resv && !(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) { - spin_lock(&bdev->glob->lru_lock); - ttm_bo_add_to_lru(bo); - spin_unlock(&bdev->glob->lru_lock); - } + spin_lock(&ttm_bo_glob.lru_lock); + ttm_bo_move_to_lru_tail(bo, NULL); + spin_unlock(&ttm_bo_glob.lru_lock); return ret; } @@ -1481,7 +1458,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, .flags = TTM_OPT_FLAG_FORCE_ALLOC }; struct ttm_mem_type_manager *man = &bdev->man[mem_type]; - struct ttm_bo_global *glob = bdev->glob; + struct ttm_bo_global *glob = &ttm_bo_glob; struct dma_fence *fence; int ret; unsigned i; @@ -1650,8 +1627,6 @@ static int ttm_bo_global_init(void) goto out; spin_lock_init(&glob->lru_lock); - glob->mem_glob = &ttm_mem_glob; - glob->mem_glob->bo_glob = glob; glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32); if (unlikely(glob->dummy_read_page == NULL)) { @@ -1675,10 +1650,10 @@ out: int ttm_bo_device_release(struct ttm_bo_device *bdev) { + struct ttm_bo_global *glob = &ttm_bo_glob; int ret = 0; unsigned i = TTM_NUM_MEM_TYPES; struct ttm_mem_type_manager *man; - struct ttm_bo_global *glob = bdev->glob; while (i--) { man = &bdev->man[i]; @@ -1708,8 +1683,6 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) pr_debug("Swap list %d was clean\n", i); spin_unlock(&glob->lru_lock); - drm_vma_offset_manager_destroy(&bdev->vma_manager); - if (!ret) ttm_bo_global_release(); @@ -1720,11 +1693,15 @@ EXPORT_SYMBOL(ttm_bo_device_release); int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_driver *driver, struct address_space *mapping, + struct drm_vma_offset_manager *vma_manager, bool need_dma32) { struct ttm_bo_global *glob = &ttm_bo_glob; int ret; + if (WARN_ON(vma_manager == NULL)) + return -EINVAL; + ret = ttm_bo_global_init(); if (ret) return ret; @@ -1741,13 +1718,10 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, if (unlikely(ret != 0)) goto out_no_sys; - drm_vma_offset_manager_init(&bdev->vma_manager, - DRM_FILE_PAGE_OFFSET_START, - DRM_FILE_PAGE_OFFSET_SIZE); + bdev->vma_manager = vma_manager; INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); INIT_LIST_HEAD(&bdev->ddestroy); bdev->dev_mapping = mapping; - bdev->glob = glob; bdev->need_dma32 = need_dma32; mutex_lock(&ttm_global_mutex); list_add_tail(&bdev->device_list, &glob->device_list); @@ -1827,31 +1801,6 @@ int ttm_bo_wait(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_wait); -int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait) -{ - int ret = 0; - - /* - * Using ttm_bo_reserve makes sure the lru lists are updated. - */ - - ret = ttm_bo_reserve(bo, true, no_wait, NULL); - if (unlikely(ret != 0)) - return ret; - ret = ttm_bo_wait(bo, true, no_wait); - if (likely(ret == 0)) - atomic_inc(&bo->cpu_writers); - ttm_bo_unreserve(bo); - return ret; -} -EXPORT_SYMBOL(ttm_bo_synccpu_write_grab); - -void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo) -{ - atomic_dec(&bo->cpu_writers); -} -EXPORT_SYMBOL(ttm_bo_synccpu_write_release); - /** * A buffer object shrink method that tries to swap out the first * buffer object on the bo_global::swap_lru list. @@ -1951,8 +1900,7 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev) .no_wait_gpu = false }; - while (ttm_bo_swapout(bdev->glob, &ctx) == 0) - ; + while (ttm_bo_swapout(&ttm_bo_glob, &ctx) == 0); } EXPORT_SYMBOL(ttm_bo_swapout_all); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index fe81c565e7ef..6b0883a1776e 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -102,7 +102,6 @@ int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) mutex_lock(&man->io_reserve_mutex); return 0; } -EXPORT_SYMBOL(ttm_mem_io_lock); void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) { @@ -111,7 +110,6 @@ void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) mutex_unlock(&man->io_reserve_mutex); } -EXPORT_SYMBOL(ttm_mem_io_unlock); static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) { @@ -153,7 +151,6 @@ retry: } return ret; } -EXPORT_SYMBOL(ttm_mem_io_reserve); void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) @@ -169,7 +166,6 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, bdev->driver->io_mem_free(bdev, mem); } -EXPORT_SYMBOL(ttm_mem_io_free); int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) { @@ -503,7 +499,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ - atomic_inc(&bo->bdev->glob->bo_count); + atomic_inc(&ttm_bo_glob.bo_count); INIT_LIST_HEAD(&fbo->base.ddestroy); INIT_LIST_HEAD(&fbo->base.lru); INIT_LIST_HEAD(&fbo->base.swap); @@ -511,15 +507,16 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, mutex_init(&fbo->base.wu_mutex); fbo->base.moving = NULL; drm_vma_node_reset(&fbo->base.base.vma_node); - atomic_set(&fbo->base.cpu_writers, 0); kref_init(&fbo->base.list_kref); kref_init(&fbo->base.kref); fbo->base.destroy = &ttm_transfered_destroy; fbo->base.acc_size = 0; - fbo->base.base.resv = &fbo->base.base._resv; - dma_resv_init(fbo->base.base.resv); - ret = dma_resv_trylock(fbo->base.base.resv); + if (bo->base.resv == &bo->base._resv) + fbo->base.base.resv = &fbo->base.base._resv; + + dma_resv_init(&fbo->base.base._resv); + ret = dma_resv_trylock(&fbo->base.base._resv); WARN_ON(!ret); *new_obj = &fbo->base; @@ -716,7 +713,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, if (ret) return ret; - dma_resv_add_excl_fence(ghost_obj->base.resv, fence); + dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); /** * If we're not moving to fixed memory, the TTM object @@ -729,7 +726,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, else bo->ttm = NULL; - ttm_bo_unreserve(ghost_obj); + dma_resv_unlock(&ghost_obj->base._resv); ttm_bo_put(ghost_obj); } @@ -772,7 +769,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, if (ret) return ret; - dma_resv_add_excl_fence(ghost_obj->base.resv, fence); + dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); /** * If we're not moving to fixed memory, the TTM object @@ -785,7 +782,7 @@ int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, else bo->ttm = NULL; - ttm_bo_unreserve(ghost_obj); + dma_resv_unlock(&ghost_obj->base._resv); ttm_bo_put(ghost_obj); } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { @@ -841,7 +838,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) if (ret) return ret; - ret = dma_resv_copy_fences(ghost->base.resv, bo->base.resv); + ret = dma_resv_copy_fences(&ghost->base._resv, bo->base.resv); /* Last resort, wait for the BO to be idle when we are OOM */ if (ret) ttm_bo_wait(bo, false, false); @@ -850,7 +847,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) bo->mem.mem_type = TTM_PL_SYSTEM; bo->ttm = NULL; - ttm_bo_unreserve(ghost); + dma_resv_unlock(&ghost->base._resv); ttm_bo_put(ghost); return 0; diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 46dc3de7e81b..4b34a278d65b 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -177,9 +177,9 @@ static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) } if (bo->moving != moving) { - spin_lock(&bdev->glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); ttm_bo_move_to_lru_tail(bo, NULL); - spin_unlock(&bdev->glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); } dma_fence_put(moving); } @@ -407,16 +407,16 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, struct drm_vma_offset_node *node; struct ttm_buffer_object *bo = NULL; - drm_vma_offset_lock_lookup(&bdev->vma_manager); + drm_vma_offset_lock_lookup(bdev->vma_manager); - node = drm_vma_offset_lookup_locked(&bdev->vma_manager, offset, pages); + node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages); if (likely(node)) { bo = container_of(node, struct ttm_buffer_object, base.vma_node); bo = ttm_bo_get_unless_zero(bo); } - drm_vma_offset_unlock_lookup(&bdev->vma_manager); + drm_vma_offset_unlock_lookup(bdev->vma_manager); if (!bo) pr_err("Could not find buffer object to map\n"); @@ -424,6 +424,28 @@ static struct ttm_buffer_object *ttm_bo_vm_lookup(struct ttm_bo_device *bdev, return bo; } +static void ttm_bo_mmap_vma_setup(struct ttm_buffer_object *bo, struct vm_area_struct *vma) +{ + vma->vm_ops = &ttm_bo_vm_ops; + + /* + * Note: We're transferring the bo reference to + * vma->vm_private_data here. + */ + + vma->vm_private_data = bo; + + /* + * We'd like to use VM_PFNMAP on shared mappings, where + * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, + * but for some reason VM_PFNMAP + x86 PAT + write-combine is very + * bad for performance. Until that has been sorted out, use + * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 + */ + vma->vm_flags |= VM_MIXEDMAP; + vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; +} + int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, struct ttm_bo_device *bdev) { @@ -447,24 +469,7 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, if (unlikely(ret != 0)) goto out_unref; - vma->vm_ops = &ttm_bo_vm_ops; - - /* - * Note: We're transferring the bo reference to - * vma->vm_private_data here. - */ - - vma->vm_private_data = bo; - - /* - * We'd like to use VM_PFNMAP on shared mappings, where - * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, - * but for some reason VM_PFNMAP + x86 PAT + write-combine is very - * bad for performance. Until that has been sorted out, use - * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 - */ - vma->vm_flags |= VM_MIXEDMAP; - vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; + ttm_bo_mmap_vma_setup(bo, vma); return 0; out_unref: ttm_bo_put(bo); @@ -472,17 +477,17 @@ out_unref: } EXPORT_SYMBOL(ttm_bo_mmap); -int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) +int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo) { - if (vma->vm_pgoff != 0) - return -EACCES; - ttm_bo_get(bo); - vma->vm_ops = &ttm_bo_vm_ops; - vma->vm_private_data = bo; - vma->vm_flags |= VM_MIXEDMAP; - vma->vm_flags |= VM_IO | VM_DONTEXPAND; + /* + * FIXME: &drm_gem_object_funcs.mmap is called with the fake offset + * removed. Add it back here until the rest of TTM works without it. + */ + vma->vm_pgoff += drm_vma_node_start(&bo->base.vma_node); + + ttm_bo_mmap_vma_setup(bo, vma); return 0; } -EXPORT_SYMBOL(ttm_fbdev_mmap); +EXPORT_SYMBOL(ttm_bo_mmap_obj); diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 131dae8f4170..1797f04c0534 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -43,37 +43,22 @@ static void ttm_eu_backoff_reservation_reverse(struct list_head *list, } } -static void ttm_eu_del_from_lru_locked(struct list_head *list) -{ - struct ttm_validate_buffer *entry; - - list_for_each_entry(entry, list, head) { - struct ttm_buffer_object *bo = entry->bo; - ttm_bo_del_from_lru(bo); - } -} - void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, struct list_head *list) { struct ttm_validate_buffer *entry; - struct ttm_bo_global *glob; if (list_empty(list)) return; - entry = list_first_entry(list, struct ttm_validate_buffer, head); - glob = entry->bo->bdev->glob; - - spin_lock(&glob->lru_lock); + spin_lock(&ttm_bo_glob.lru_lock); list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; - if (list_empty(&bo->lru)) - ttm_bo_add_to_lru(bo); + ttm_bo_move_to_lru_tail(bo, NULL); dma_resv_unlock(bo->base.resv); } - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); if (ticket) ww_acquire_fini(ticket); @@ -94,18 +79,14 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, struct list_head *list, bool intr, - struct list_head *dups, bool del_lru) + struct list_head *dups) { - struct ttm_bo_global *glob; struct ttm_validate_buffer *entry; int ret; if (list_empty(list)) return 0; - entry = list_first_entry(list, struct ttm_validate_buffer, head); - glob = entry->bo->bdev->glob; - if (ticket) ww_acquire_init(ticket, &reservation_ww_class); @@ -113,12 +94,7 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, struct ttm_buffer_object *bo = entry->bo; ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket); - if (!ret && unlikely(atomic_read(&bo->cpu_writers) > 0)) { - dma_resv_unlock(bo->base.resv); - - ret = -EBUSY; - - } else if (ret == -EALREADY && dups) { + if (ret == -EALREADY && dups) { struct ttm_validate_buffer *safe = entry; entry = list_prev_entry(entry, head); list_del(&safe->head); @@ -173,11 +149,6 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, list_add(&entry->head, list); } - if (del_lru) { - spin_lock(&glob->lru_lock); - ttm_eu_del_from_lru_locked(list); - spin_unlock(&glob->lru_lock); - } return 0; } EXPORT_SYMBOL(ttm_eu_reserve_buffers); @@ -187,30 +158,22 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, struct dma_fence *fence) { struct ttm_validate_buffer *entry; - struct ttm_buffer_object *bo; - struct ttm_bo_global *glob; if (list_empty(list)) return; - bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo; - glob = bo->bdev->glob; - - spin_lock(&glob->lru_lock); - + spin_lock(&ttm_bo_glob.lru_lock); list_for_each_entry(entry, list, head) { - bo = entry->bo; + struct ttm_buffer_object *bo = entry->bo; + if (entry->num_shared) dma_resv_add_shared_fence(bo->base.resv, fence); else dma_resv_add_excl_fence(bo->base.resv, fence); - if (list_empty(&bo->lru)) - ttm_bo_add_to_lru(bo); - else - ttm_bo_move_to_lru_tail(bo, NULL); + ttm_bo_move_to_lru_tail(bo, NULL); dma_resv_unlock(bo->base.resv); } - spin_unlock(&glob->lru_lock); + spin_unlock(&ttm_bo_glob.lru_lock); if (ticket) ww_acquire_fini(ticket); } diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c index 8617958b7ae6..acd63b70d814 100644 --- a/drivers/gpu/drm/ttm/ttm_memory.c +++ b/drivers/gpu/drm/ttm/ttm_memory.c @@ -275,7 +275,7 @@ static void ttm_shrink(struct ttm_mem_global *glob, bool from_wq, while (ttm_zones_above_swap_target(glob, from_wq, extra)) { spin_unlock(&glob->lock); - ret = ttm_bo_swapout(glob->bo_glob, ctx); + ret = ttm_bo_swapout(&ttm_bo_glob, ctx); spin_lock(&glob->lock); if (unlikely(ret != 0)) break; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 627f8dc91d0e..b40a4678c296 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -1028,7 +1028,7 @@ void ttm_page_alloc_fini(void) static void ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) { - struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; + struct ttm_mem_global *mem_glob = &ttm_mem_glob; unsigned i; if (mem_count_update == 0) @@ -1049,7 +1049,7 @@ put_pages: int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { - struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; + struct ttm_mem_global *mem_glob = &ttm_mem_glob; unsigned i; int ret; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 7d78e6deac89..bf876faea592 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -33,7 +33,6 @@ * when freed). */ -#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) #define pr_fmt(fmt) "[TTM] " fmt #include <linux/dma-mapping.h> @@ -886,8 +885,8 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, struct ttm_operation_ctx *ctx) { + struct ttm_mem_global *mem_glob = &ttm_mem_glob; struct ttm_tt *ttm = &ttm_dma->ttm; - struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; unsigned long num_pages = ttm->num_pages; struct dma_pool *pool; struct dma_page *d_page; @@ -991,8 +990,8 @@ EXPORT_SYMBOL_GPL(ttm_dma_populate); /* Put all pages in pages list to correct pool to wait for reuse */ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) { + struct ttm_mem_global *mem_glob = &ttm_mem_glob; struct ttm_tt *ttm = &ttm_dma->ttm; - struct ttm_mem_global *mem_glob = ttm->bdev->glob->mem_glob; struct dma_pool *pool; struct dma_page *d_page, *next; enum pool_type type; @@ -1238,5 +1237,3 @@ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) return 0; } EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs); - -#endif diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c index 416f24823c0a..954b09c948eb 100644 --- a/drivers/gpu/drm/tve200/tve200_drv.c +++ b/drivers/gpu/drm/tve200/tve200_drv.c @@ -80,8 +80,8 @@ static int tve200_modeset_init(struct drm_device *dev) if (ret && ret != -ENODEV) return ret; if (panel) { - bridge = drm_panel_bridge_add(panel, - DRM_MODE_CONNECTOR_Unknown); + bridge = drm_panel_bridge_add_typed(panel, + DRM_MODE_CONNECTOR_Unknown); if (IS_ERR(bridge)) { ret = PTR_ERR(bridge); goto out_bridge; diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c index ddb61a60c610..b4ae3e89a7b4 100644 --- a/drivers/gpu/drm/udl/udl_connector.c +++ b/drivers/gpu/drm/udl/udl_connector.c @@ -90,13 +90,6 @@ udl_detect(struct drm_connector *connector, bool force) return connector_status_connected; } -static struct drm_encoder* -udl_best_single_encoder(struct drm_connector *connector) -{ - int enc_id = connector->encoder_ids[0]; - return drm_encoder_find(connector->dev, NULL, enc_id); -} - static int udl_connector_set_property(struct drm_connector *connector, struct drm_property *property, uint64_t val) @@ -120,7 +113,6 @@ static void udl_connector_destroy(struct drm_connector *connector) static const struct drm_connector_helper_funcs udl_connector_helper_funcs = { .get_modes = udl_get_modes, .mode_valid = udl_mode_valid, - .best_encoder = udl_best_single_encoder, }; static const struct drm_connector_funcs udl_connector_funcs = { diff --git a/drivers/gpu/drm/v3d/v3d_bo.c b/drivers/gpu/drm/v3d/v3d_bo.c index a22b75a3a533..edd299ab53d8 100644 --- a/drivers/gpu/drm/v3d/v3d_bo.c +++ b/drivers/gpu/drm/v3d/v3d_bo.c @@ -58,7 +58,7 @@ static const struct drm_gem_object_funcs v3d_gem_funcs = { .get_sg_table = drm_gem_shmem_get_sg_table, .vmap = drm_gem_shmem_vmap, .vunmap = drm_gem_shmem_vunmap, - .vm_ops = &drm_gem_shmem_vm_ops, + .mmap = drm_gem_shmem_mmap, }; /* gem_create_object function for allocating a BO struct and doing diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 3506ae2723ae..1a07462b4528 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -126,6 +126,9 @@ static int v3d_get_param_ioctl(struct drm_device *dev, void *data, case DRM_V3D_PARAM_SUPPORTS_CSD: args->value = v3d_has_csd(v3d); return 0; + case DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH: + args->value = 1; + return 0; default: DRM_DEBUG("Unknown parameter %d\n", args->param); return -EINVAL; @@ -169,7 +172,7 @@ v3d_postclose(struct drm_device *dev, struct drm_file *file) kfree(v3d_priv); } -DEFINE_DRM_GEM_SHMEM_FOPS(v3d_drm_fops); +DEFINE_DRM_GEM_FOPS(v3d_drm_fops); /* DRM_AUTH is required on SUBMIT_CL for now, while we don't have GMP * protection between clients. Note that render nodes would be be diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 19c092d75266..549dde83408b 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -530,13 +530,16 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, struct drm_v3d_submit_cl *args = data; struct v3d_bin_job *bin = NULL; struct v3d_render_job *render; + struct v3d_job *clean_job = NULL; + struct v3d_job *last_job; struct ww_acquire_ctx acquire_ctx; int ret = 0; trace_v3d_submit_cl_ioctl(&v3d->drm, args->rcl_start, args->rcl_end); - if (args->pad != 0) { - DRM_INFO("pad must be zero: %d\n", args->pad); + if (args->flags != 0 && + args->flags != DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { + DRM_INFO("invalid flags: %d\n", args->flags); return -EINVAL; } @@ -565,6 +568,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, ret = v3d_job_init(v3d, file_priv, &bin->base, v3d_job_free, args->in_sync_bcl); if (ret) { + kfree(bin); v3d_job_put(&render->base); kfree(bin); return ret; @@ -578,12 +582,31 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, bin->render = render; } - ret = v3d_lookup_bos(dev, file_priv, &render->base, + if (args->flags & DRM_V3D_SUBMIT_CL_FLUSH_CACHE) { + clean_job = kcalloc(1, sizeof(*clean_job), GFP_KERNEL); + if (!clean_job) { + ret = -ENOMEM; + goto fail; + } + + ret = v3d_job_init(v3d, file_priv, clean_job, v3d_job_free, 0); + if (ret) { + kfree(clean_job); + clean_job = NULL; + goto fail; + } + + last_job = clean_job; + } else { + last_job = &render->base; + } + + ret = v3d_lookup_bos(dev, file_priv, last_job, args->bo_handles, args->bo_handle_count); if (ret) goto fail; - ret = v3d_lock_bo_reservations(&render->base, &acquire_ctx); + ret = v3d_lock_bo_reservations(last_job, &acquire_ctx); if (ret) goto fail; @@ -602,28 +625,44 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, ret = v3d_push_job(v3d_priv, &render->base, V3D_RENDER); if (ret) goto fail_unreserve; + + if (clean_job) { + struct dma_fence *render_fence = + dma_fence_get(render->base.done_fence); + ret = drm_gem_fence_array_add(&clean_job->deps, render_fence); + if (ret) + goto fail_unreserve; + ret = v3d_push_job(v3d_priv, clean_job, V3D_CACHE_CLEAN); + if (ret) + goto fail_unreserve; + } + mutex_unlock(&v3d->sched_lock); v3d_attach_fences_and_unlock_reservation(file_priv, - &render->base, + last_job, &acquire_ctx, args->out_sync, - render->base.done_fence); + last_job->done_fence); if (bin) v3d_job_put(&bin->base); v3d_job_put(&render->base); + if (clean_job) + v3d_job_put(clean_job); return 0; fail_unreserve: mutex_unlock(&v3d->sched_lock); - drm_gem_unlock_reservations(render->base.bo, - render->base.bo_count, &acquire_ctx); + drm_gem_unlock_reservations(last_job->bo, + last_job->bo_count, &acquire_ctx); fail: if (bin) v3d_job_put(&bin->base); v3d_job_put(&render->base); + if (clean_job) + v3d_job_put(clean_job); return ret; } diff --git a/drivers/gpu/drm/vboxvideo/Kconfig b/drivers/gpu/drm/vboxvideo/Kconfig index 56ba510f21a2..45fe135d6e43 100644 --- a/drivers/gpu/drm/vboxvideo/Kconfig +++ b/drivers/gpu/drm/vboxvideo/Kconfig @@ -4,6 +4,8 @@ config DRM_VBOXVIDEO depends on DRM && X86 && PCI select DRM_KMS_HELPER select DRM_VRAM_HELPER + select DRM_TTM + select DRM_TTM_HELPER select GENERIC_ALLOCATOR help This is a KMS driver for the virtual Graphics Card used in diff --git a/drivers/gpu/drm/vboxvideo/Makefile b/drivers/gpu/drm/vboxvideo/Makefile index 55d798c76b21..f2e968b5ffa6 100644 --- a/drivers/gpu/drm/vboxvideo/Makefile +++ b/drivers/gpu/drm/vboxvideo/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \ - vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \ + vbox_drv.o vbox_hgsmi.o vbox_irq.o vbox_main.o \ vbox_mode.o vbox_ttm.o obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index 862db495d111..8512d970a09f 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -14,6 +14,7 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_drv.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_file.h> #include <drm/drm_ioctl.h> @@ -32,10 +33,6 @@ static const struct pci_device_id pciidlist[] = { }; MODULE_DEVICE_TABLE(pci, pciidlist); -static const struct drm_fb_helper_funcs vbox_fb_helper_funcs = { - .fb_probe = vboxfb_create, -}; - static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct vbox_private *vbox; @@ -79,20 +76,16 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (ret) goto err_mode_fini; - ret = drm_fb_helper_fbdev_setup(&vbox->ddev, &vbox->fb_helper, - &vbox_fb_helper_funcs, 32, - vbox->num_crtcs); + ret = drm_fbdev_generic_setup(&vbox->ddev, 32); if (ret) goto err_irq_fini; ret = drm_dev_register(&vbox->ddev, 0); if (ret) - goto err_fbdev_fini; + goto err_irq_fini; return 0; -err_fbdev_fini: - vbox_fbdev_fini(vbox); err_irq_fini: vbox_irq_fini(vbox); err_mode_fini: @@ -113,7 +106,6 @@ static void vbox_pci_remove(struct pci_dev *pdev) struct vbox_private *vbox = pci_get_drvdata(pdev); drm_dev_unregister(&vbox->ddev); - vbox_fbdev_fini(vbox); vbox_irq_fini(vbox); vbox_mode_fini(vbox); vbox_mm_fini(vbox); @@ -189,10 +181,7 @@ static struct pci_driver vbox_pci_driver = { #endif }; -static const struct file_operations vbox_fops = { - .owner = THIS_MODULE, - DRM_VRAM_MM_FILE_OPERATIONS -}; +DEFINE_DRM_GEM_FOPS(vbox_fops); static struct drm_driver driver = { .driver_features = diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.h b/drivers/gpu/drm/vboxvideo/vbox_drv.h index e8cb9efc6088..87421903816c 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.h +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.h @@ -16,12 +16,9 @@ #include <linux/string.h> #include <drm/drm_encoder.h> -#include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> #include <drm/drm_gem_vram_helper.h> -#include <drm/drm_vram_mm_helper.h> - #include "vboxvideo_guest.h" #include "vboxvideo_vbe.h" #include "hgsmi_ch_setup.h" @@ -48,16 +45,9 @@ sizeof(struct hgsmi_host_flags)) #define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE -struct vbox_framebuffer { - struct drm_framebuffer base; - struct drm_gem_object *obj; -}; - struct vbox_private { /* Must be first; or we must define our own release callback */ struct drm_device ddev; - struct drm_fb_helper fb_helper; - struct vbox_framebuffer afb; u8 __iomem *guest_heap; u8 __iomem *vbva_buffers; @@ -137,7 +127,6 @@ struct vbox_encoder { #define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base) #define to_vbox_connector(x) container_of(x, struct vbox_connector, base) #define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base) -#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base) bool vbox_check_supported(u16 id); int vbox_hw_init(struct vbox_private *vbox); @@ -148,25 +137,9 @@ void vbox_mode_fini(struct vbox_private *vbox); void vbox_report_caps(struct vbox_private *vbox); -void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb, - struct drm_clip_rect *rects, - unsigned int num_rects); - -int vbox_framebuffer_init(struct vbox_private *vbox, - struct vbox_framebuffer *vbox_fb, - const struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_gem_object *obj); - -int vboxfb_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes); -void vbox_fbdev_fini(struct vbox_private *vbox); - int vbox_mm_init(struct vbox_private *vbox); void vbox_mm_fini(struct vbox_private *vbox); -int vbox_gem_create(struct vbox_private *vbox, - u32 size, bool iskernel, struct drm_gem_object **obj); - /* vbox_irq.c */ int vbox_irq_init(struct vbox_private *vbox); void vbox_irq_fini(struct vbox_private *vbox); diff --git a/drivers/gpu/drm/vboxvideo/vbox_fb.c b/drivers/gpu/drm/vboxvideo/vbox_fb.c deleted file mode 100644 index 8f74bcffc034..000000000000 --- a/drivers/gpu/drm/vboxvideo/vbox_fb.c +++ /dev/null @@ -1,149 +0,0 @@ -// SPDX-License-Identifier: MIT -/* - * Copyright (C) 2013-2017 Oracle Corporation - * This file is based on ast_fb.c - * Copyright 2012 Red Hat Inc. - * Authors: Dave Airlie <airlied@redhat.com> - * Michael Thayer <michael.thayer@oracle.com, - */ -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/fb.h> -#include <linux/init.h> -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/module.h> -#include <linux/pci.h> -#include <linux/string.h> -#include <linux/sysrq.h> -#include <linux/tty.h> - -#include <drm/drm_crtc.h> -#include <drm/drm_crtc_helper.h> -#include <drm/drm_fb_helper.h> -#include <drm/drm_fourcc.h> - -#include "vbox_drv.h" -#include "vboxvideo.h" - -#ifdef CONFIG_DRM_KMS_FB_HELPER -static struct fb_deferred_io vbox_defio = { - .delay = HZ / 30, - .deferred_io = drm_fb_helper_deferred_io, -}; -#endif - -static struct fb_ops vboxfb_ops = { - .owner = THIS_MODULE, - DRM_FB_HELPER_DEFAULT_OPS, - .fb_fillrect = drm_fb_helper_sys_fillrect, - .fb_copyarea = drm_fb_helper_sys_copyarea, - .fb_imageblit = drm_fb_helper_sys_imageblit, -}; - -int vboxfb_create(struct drm_fb_helper *helper, - struct drm_fb_helper_surface_size *sizes) -{ - struct vbox_private *vbox = - container_of(helper, struct vbox_private, fb_helper); - struct pci_dev *pdev = vbox->ddev.pdev; - struct drm_mode_fb_cmd2 mode_cmd; - struct drm_framebuffer *fb; - struct fb_info *info; - struct drm_gem_object *gobj; - struct drm_gem_vram_object *gbo; - int size, ret; - s64 gpu_addr; - u32 pitch; - - mode_cmd.width = sizes->surface_width; - mode_cmd.height = sizes->surface_height; - pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); - mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, - sizes->surface_depth); - mode_cmd.pitches[0] = pitch; - - size = pitch * mode_cmd.height; - - ret = vbox_gem_create(vbox, size, true, &gobj); - if (ret) { - DRM_ERROR("failed to create fbcon backing object %d\n", ret); - return ret; - } - - ret = vbox_framebuffer_init(vbox, &vbox->afb, &mode_cmd, gobj); - if (ret) - return ret; - - gbo = drm_gem_vram_of_gem(gobj); - - ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); - if (ret) - return ret; - - info = drm_fb_helper_alloc_fbi(helper); - if (IS_ERR(info)) - return PTR_ERR(info); - - info->screen_size = size; - info->screen_base = (char __iomem *)drm_gem_vram_kmap(gbo, true, NULL); - if (IS_ERR(info->screen_base)) - return PTR_ERR(info->screen_base); - - fb = &vbox->afb.base; - helper->fb = fb; - - info->fbops = &vboxfb_ops; - - /* - * This seems to be done for safety checking that the framebuffer - * is not registered twice by different drivers. - */ - info->apertures->ranges[0].base = pci_resource_start(pdev, 0); - info->apertures->ranges[0].size = pci_resource_len(pdev, 0); - - drm_fb_helper_fill_info(info, helper, sizes); - - gpu_addr = drm_gem_vram_offset(gbo); - if (gpu_addr < 0) - return (int)gpu_addr; - info->fix.smem_start = info->apertures->ranges[0].base + gpu_addr; - info->fix.smem_len = vbox->available_vram_size - gpu_addr; - -#ifdef CONFIG_DRM_KMS_FB_HELPER - info->fbdefio = &vbox_defio; - fb_deferred_io_init(info); -#endif - - info->pixmap.flags = FB_PIXMAP_SYSTEM; - - DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height); - - return 0; -} - -void vbox_fbdev_fini(struct vbox_private *vbox) -{ - struct vbox_framebuffer *afb = &vbox->afb; - -#ifdef CONFIG_DRM_KMS_FB_HELPER - if (vbox->fb_helper.fbdev && vbox->fb_helper.fbdev->fbdefio) - fb_deferred_io_cleanup(vbox->fb_helper.fbdev); -#endif - - drm_fb_helper_unregister_fbi(&vbox->fb_helper); - - if (afb->obj) { - struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(afb->obj); - - drm_gem_vram_kunmap(gbo); - drm_gem_vram_unpin(gbo); - - drm_gem_object_put_unlocked(afb->obj); - afb->obj = NULL; - } - drm_fb_helper_fini(&vbox->fb_helper); - - drm_framebuffer_unregister_private(&afb->base); - drm_framebuffer_cleanup(&afb->base); -} diff --git a/drivers/gpu/drm/vboxvideo/vbox_main.c b/drivers/gpu/drm/vboxvideo/vbox_main.c index 02fa8277ff1e..9dcab115a261 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_main.c +++ b/drivers/gpu/drm/vboxvideo/vbox_main.c @@ -11,22 +11,12 @@ #include <linux/vbox_err.h> #include <drm/drm_fb_helper.h> #include <drm/drm_crtc_helper.h> +#include <drm/drm_damage_helper.h> #include "vbox_drv.h" #include "vboxvideo_guest.h" #include "vboxvideo_vbe.h" -static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb) -{ - struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb); - - if (vbox_fb->obj) - drm_gem_object_put_unlocked(vbox_fb->obj); - - drm_framebuffer_cleanup(fb); - kfree(fb); -} - void vbox_report_caps(struct vbox_private *vbox) { u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION | @@ -38,87 +28,6 @@ void vbox_report_caps(struct vbox_private *vbox) hgsmi_send_caps_info(vbox->guest_pool, caps); } -/* Send information about dirty rectangles to VBVA. */ -void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb, - struct drm_clip_rect *rects, - unsigned int num_rects) -{ - struct vbox_private *vbox = fb->dev->dev_private; - struct drm_display_mode *mode; - struct drm_crtc *crtc; - int crtc_x, crtc_y; - unsigned int i; - - mutex_lock(&vbox->hw_mutex); - list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) { - if (crtc->primary->state->fb != fb) - continue; - - mode = &crtc->state->mode; - crtc_x = crtc->primary->state->src_x >> 16; - crtc_y = crtc->primary->state->src_y >> 16; - - for (i = 0; i < num_rects; ++i) { - struct vbva_cmd_hdr cmd_hdr; - unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id; - - if (rects[i].x1 > crtc_x + mode->hdisplay || - rects[i].y1 > crtc_y + mode->vdisplay || - rects[i].x2 < crtc_x || - rects[i].y2 < crtc_y) - continue; - - cmd_hdr.x = (s16)rects[i].x1; - cmd_hdr.y = (s16)rects[i].y1; - cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1; - cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1; - - if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id], - vbox->guest_pool)) - continue; - - vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool, - &cmd_hdr, sizeof(cmd_hdr)); - vbva_buffer_end_update(&vbox->vbva_info[crtc_id]); - } - } - mutex_unlock(&vbox->hw_mutex); -} - -static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb, - struct drm_file *file_priv, - unsigned int flags, unsigned int color, - struct drm_clip_rect *rects, - unsigned int num_rects) -{ - vbox_framebuffer_dirty_rectangles(fb, rects, num_rects); - - return 0; -} - -static const struct drm_framebuffer_funcs vbox_fb_funcs = { - .destroy = vbox_user_framebuffer_destroy, - .dirty = vbox_user_framebuffer_dirty, -}; - -int vbox_framebuffer_init(struct vbox_private *vbox, - struct vbox_framebuffer *vbox_fb, - const struct drm_mode_fb_cmd2 *mode_cmd, - struct drm_gem_object *obj) -{ - int ret; - - drm_helper_mode_fill_fb_struct(&vbox->ddev, &vbox_fb->base, mode_cmd); - vbox_fb->obj = obj; - ret = drm_framebuffer_init(&vbox->ddev, &vbox_fb->base, &vbox_fb_funcs); - if (ret) { - DRM_ERROR("framebuffer init failed %d\n", ret); - return ret; - } - - return 0; -} - static int vbox_accel_init(struct vbox_private *vbox) { struct vbva_buffer *vbva; @@ -270,29 +179,3 @@ void vbox_hw_fini(struct vbox_private *vbox) gen_pool_destroy(vbox->guest_pool); pci_iounmap(vbox->ddev.pdev, vbox->guest_heap); } - -int vbox_gem_create(struct vbox_private *vbox, - u32 size, bool iskernel, struct drm_gem_object **obj) -{ - struct drm_gem_vram_object *gbo; - int ret; - - *obj = NULL; - - size = roundup(size, PAGE_SIZE); - if (size == 0) - return -EINVAL; - - gbo = drm_gem_vram_create(&vbox->ddev, &vbox->ddev.vram_mm->bdev, - size, 0, false); - if (IS_ERR(gbo)) { - ret = PTR_ERR(gbo); - if (ret != -ERESTARTSYS) - DRM_ERROR("failed to allocate GEM object\n"); - return ret; - } - - *obj = &gbo->bo.base; - - return 0; -} diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c index e1e48ba919eb..19612132c8a3 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_mode.c +++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c @@ -13,7 +13,9 @@ #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_plane_helper.h> #include <drm/drm_probe_helper.h> #include <drm/drm_vblank.h> @@ -133,7 +135,7 @@ static bool vbox_set_up_input_mapping(struct vbox_private *vbox) if (!fb1) { fb1 = fb; - if (to_vbox_framebuffer(fb1) == &vbox->afb) + if (fb1 == vbox->ddev.fb_helper->fb) break; } else if (fb != fb1) { single_framebuffer = false; @@ -172,8 +174,7 @@ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc, struct drm_framebuffer *fb, int x, int y) { - struct drm_gem_vram_object *gbo = - drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj); + struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]); struct vbox_private *vbox = crtc->dev->dev_private; struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state); @@ -283,10 +284,43 @@ static void vbox_primary_atomic_update(struct drm_plane *plane, { struct drm_crtc *crtc = plane->state->crtc; struct drm_framebuffer *fb = plane->state->fb; + struct vbox_private *vbox = fb->dev->dev_private; + struct drm_mode_rect *clips; + uint32_t num_clips, i; vbox_crtc_set_base_and_mode(crtc, fb, plane->state->src_x >> 16, plane->state->src_y >> 16); + + /* Send information about dirty rectangles to VBVA. */ + + clips = drm_plane_get_damage_clips(plane->state); + num_clips = drm_plane_get_damage_clips_count(plane->state); + + if (!num_clips) + return; + + mutex_lock(&vbox->hw_mutex); + + for (i = 0; i < num_clips; ++i, ++clips) { + struct vbva_cmd_hdr cmd_hdr; + unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id; + + cmd_hdr.x = (s16)clips->x1; + cmd_hdr.y = (s16)clips->y1; + cmd_hdr.w = (u16)clips->x2 - clips->x1; + cmd_hdr.h = (u16)clips->y2 - clips->y1; + + if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id], + vbox->guest_pool)) + continue; + + vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool, + &cmd_hdr, sizeof(cmd_hdr)); + vbva_buffer_end_update(&vbox->vbva_info[crtc_id]); + } + + mutex_unlock(&vbox->hw_mutex); } static void vbox_primary_atomic_disable(struct drm_plane *plane, @@ -300,35 +334,6 @@ static void vbox_primary_atomic_disable(struct drm_plane *plane, old_state->src_y >> 16); } -static int vbox_primary_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) -{ - struct drm_gem_vram_object *gbo; - int ret; - - if (!new_state->fb) - return 0; - - gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj); - ret = drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_VRAM); - if (ret) - DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret); - - return ret; -} - -static void vbox_primary_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct drm_gem_vram_object *gbo; - - if (!old_state->fb) - return; - - gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(old_state->fb)->obj); - drm_gem_vram_unpin(gbo); -} - static int vbox_cursor_atomic_check(struct drm_plane *plane, struct drm_plane_state *new_state) { @@ -386,8 +391,7 @@ static void vbox_cursor_atomic_update(struct drm_plane *plane, container_of(plane->dev, struct vbox_private, ddev); struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc); struct drm_framebuffer *fb = plane->state->fb; - struct drm_gem_vram_object *gbo = - drm_gem_vram_of_gem(to_vbox_framebuffer(fb)->obj); + struct drm_gem_vram_object *gbo = drm_gem_vram_of_gem(fb->obj[0]); u32 width = plane->state->crtc_w; u32 height = plane->state->crtc_h; size_t data_size, mask_size; @@ -459,30 +463,6 @@ static void vbox_cursor_atomic_disable(struct drm_plane *plane, mutex_unlock(&vbox->hw_mutex); } -static int vbox_cursor_prepare_fb(struct drm_plane *plane, - struct drm_plane_state *new_state) -{ - struct drm_gem_vram_object *gbo; - - if (!new_state->fb) - return 0; - - gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(new_state->fb)->obj); - return drm_gem_vram_pin(gbo, DRM_GEM_VRAM_PL_FLAG_SYSTEM); -} - -static void vbox_cursor_cleanup_fb(struct drm_plane *plane, - struct drm_plane_state *old_state) -{ - struct drm_gem_vram_object *gbo; - - if (!plane->state->fb) - return; - - gbo = drm_gem_vram_of_gem(to_vbox_framebuffer(plane->state->fb)->obj); - drm_gem_vram_unpin(gbo); -} - static const u32 vbox_cursor_plane_formats[] = { DRM_FORMAT_ARGB8888, }; @@ -491,8 +471,8 @@ static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = { .atomic_check = vbox_cursor_atomic_check, .atomic_update = vbox_cursor_atomic_update, .atomic_disable = vbox_cursor_atomic_disable, - .prepare_fb = vbox_cursor_prepare_fb, - .cleanup_fb = vbox_cursor_cleanup_fb, + .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, + .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb, }; static const struct drm_plane_funcs vbox_cursor_plane_funcs = { @@ -513,8 +493,8 @@ static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = { .atomic_check = vbox_primary_atomic_check, .atomic_update = vbox_primary_atomic_update, .atomic_disable = vbox_primary_atomic_disable, - .prepare_fb = vbox_primary_prepare_fb, - .cleanup_fb = vbox_primary_cleanup_fb, + .prepare_fb = drm_gem_vram_plane_helper_prepare_fb, + .cleanup_fb = drm_gem_vram_plane_helper_cleanup_fb, }; static const struct drm_plane_funcs vbox_primary_plane_funcs = { @@ -856,40 +836,8 @@ static int vbox_connector_init(struct drm_device *dev, return 0; } -static struct drm_framebuffer *vbox_user_framebuffer_create( - struct drm_device *dev, - struct drm_file *filp, - const struct drm_mode_fb_cmd2 *mode_cmd) -{ - struct vbox_private *vbox = - container_of(dev, struct vbox_private, ddev); - struct drm_gem_object *obj; - struct vbox_framebuffer *vbox_fb; - int ret = -ENOMEM; - - obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); - if (!obj) - return ERR_PTR(-ENOENT); - - vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL); - if (!vbox_fb) - goto err_unref_obj; - - ret = vbox_framebuffer_init(vbox, vbox_fb, mode_cmd, obj); - if (ret) - goto err_free_vbox_fb; - - return &vbox_fb->base; - -err_free_vbox_fb: - kfree(vbox_fb); -err_unref_obj: - drm_gem_object_put_unlocked(obj); - return ERR_PTR(ret); -} - static const struct drm_mode_config_funcs vbox_mode_funcs = { - .fb_create = vbox_user_framebuffer_create, + .fb_create = drm_gem_fb_create_with_dirty, .atomic_check = drm_atomic_helper_check, .atomic_commit = drm_atomic_helper_commit, }; diff --git a/drivers/gpu/drm/vboxvideo/vbox_ttm.c b/drivers/gpu/drm/vboxvideo/vbox_ttm.c index b82595a9ed0f..976423d0c3cc 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_ttm.c +++ b/drivers/gpu/drm/vboxvideo/vbox_ttm.c @@ -17,8 +17,7 @@ int vbox_mm_init(struct vbox_private *vbox) struct drm_device *dev = &vbox->ddev; vmm = drm_vram_helper_alloc_mm(dev, pci_resource_start(dev->pdev, 0), - vbox->available_vram_size, - &drm_gem_vram_mm_funcs); + vbox->available_vram_size); if (IS_ERR(vmm)) { ret = PTR_ERR(vmm); DRM_ERROR("Error initializing VRAM MM; %d\n", ret); diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index f1f0a7c87771..b00e20f5ce05 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -994,7 +994,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc, struct vc4_dev *vc4 = to_vc4_dev(crtc->dev); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(state); - if (vc4_state->mm.allocated) { + if (drm_mm_node_allocated(&vc4_state->mm)) { unsigned long flags; spin_lock_irqsave(&vc4->hvs->mm_lock, flags); diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c index 8a27a6acee61..c586325de2a5 100644 --- a/drivers/gpu/drm/vc4/vc4_dpi.c +++ b/drivers/gpu/drm/vc4/vc4_dpi.c @@ -249,7 +249,8 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi) } if (panel) - bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_DPI); + bridge = drm_panel_bridge_add_typed(panel, + DRM_MODE_CONNECTOR_DPI); return drm_bridge_attach(dpi->encoder, bridge, NULL); } diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c index c78fa8144776..c9ba83ed49b9 100644 --- a/drivers/gpu/drm/vc4/vc4_dsi.c +++ b/drivers/gpu/drm/vc4/vc4_dsi.c @@ -31,6 +31,7 @@ #include <linux/pm_runtime.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> #include <drm/drm_edid.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_of.h> @@ -1575,8 +1576,8 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data) } if (panel) { - dsi->bridge = devm_drm_panel_bridge_add(dev, panel, - DRM_MODE_CONNECTOR_DSI); + dsi->bridge = devm_drm_panel_bridge_add_typed(dev, panel, + DRM_MODE_CONNECTOR_DSI); if (IS_ERR(dsi->bridge)) return PTR_ERR(dsi->bridge); } diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index ee7d4e7b0ee3..1c62c6c9244b 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -398,10 +398,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder) HDMI_QUANTIZATION_RANGE_LIMITED : HDMI_QUANTIZATION_RANGE_FULL); - frame.avi.right_bar = cstate->tv.margins.right; - frame.avi.left_bar = cstate->tv.margins.left; - frame.avi.top_bar = cstate->tv.margins.top; - frame.avi.bottom_bar = cstate->tv.margins.bottom; + drm_hdmi_avi_infoframe_bars(&frame.avi, cstate); vc4_hdmi_write_infoframe(encoder, &frame); } @@ -1285,6 +1282,9 @@ static const struct cec_adap_ops vc4_hdmi_cec_adap_ops = { static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) { +#ifdef CONFIG_DRM_VC4_HDMI_CEC + struct cec_connector_info conn_info; +#endif struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = dev_get_drvdata(master); struct vc4_dev *vc4 = drm->dev_private; @@ -1403,13 +1403,15 @@ static int vc4_hdmi_bind(struct device *dev, struct device *master, void *data) #ifdef CONFIG_DRM_VC4_HDMI_CEC hdmi->cec_adap = cec_allocate_adapter(&vc4_hdmi_cec_adap_ops, vc4, "vc4", - CEC_CAP_TRANSMIT | - CEC_CAP_LOG_ADDRS | - CEC_CAP_PASSTHROUGH | - CEC_CAP_RC, 1); + CEC_CAP_DEFAULTS | + CEC_CAP_CONNECTOR_INFO, 1); ret = PTR_ERR_OR_ZERO(hdmi->cec_adap); if (ret < 0) goto err_destroy_conn; + + cec_fill_conn_info_from_drm(&conn_info, hdmi->connector); + cec_s_conn_info(hdmi->cec_adap, &conn_info); + HDMI_WRITE(VC4_HDMI_CPU_MASK_SET, 0xffffffff); value = HDMI_READ(VC4_HDMI_CEC_CNTRL_1); value &= ~VC4_HDMI_CEC_DIV_CLK_CNT_MASK; diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 9936b15d0bf1..5a43659da319 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -315,7 +315,7 @@ static void vc4_hvs_unbind(struct device *dev, struct device *master, struct drm_device *drm = dev_get_drvdata(master); struct vc4_dev *vc4 = drm->dev_private; - if (vc4->hvs->mitchell_netravali_filter.allocated) + if (drm_mm_node_allocated(&vc4->hvs->mitchell_netravali_filter)) drm_mm_remove_node(&vc4->hvs->mitchell_netravali_filter); drm_mm_takedown(&vc4->hvs->dlist_mm); diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 5e5f90810aca..4934127f0d76 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -178,7 +178,7 @@ static void vc4_plane_destroy_state(struct drm_plane *plane, struct vc4_dev *vc4 = to_vc4_dev(plane->dev); struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); - if (vc4_state->lbm.allocated) { + if (drm_mm_node_allocated(&vc4_state->lbm)) { unsigned long irqflags; spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); @@ -557,7 +557,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) /* Allocate the LBM memory that the HVS will use for temporary * storage due to our scaling/format conversion. */ - if (!vc4_state->lbm.allocated) { + if (!drm_mm_node_allocated(&vc4_state->lbm)) { int ret; spin_lock_irqsave(&vc4->hvs->mm_lock, irqflags); diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig index ba36e933bb49..eff3047052d4 100644 --- a/drivers/gpu/drm/virtio/Kconfig +++ b/drivers/gpu/drm/virtio/Kconfig @@ -3,7 +3,7 @@ config DRM_VIRTIO_GPU tristate "Virtio GPU driver" depends on DRM && VIRTIO && MMU select DRM_KMS_HELPER - select DRM_TTM + select DRM_GEM_SHMEM_HELPER help This is the virtual GPU driver for virtio. It can be used with QEMU based VMMs (like KVM or Xen). diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile index 458e606a936f..92aa2b3d349d 100644 --- a/drivers/gpu/drm/virtio/Makefile +++ b/drivers/gpu/drm/virtio/Makefile @@ -4,7 +4,7 @@ # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o \ - virtgpu_display.o virtgpu_vq.o virtgpu_ttm.o \ + virtgpu_display.o virtgpu_vq.o \ virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \ virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c index 0fc32fa0b3c0..8dee698c90ff 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.c +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -56,7 +56,6 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd dev->pdev = pdev; if (vga) drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, - 0, "virtiodrmfb"); /* @@ -185,17 +184,7 @@ MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>"); MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); MODULE_AUTHOR("Alon Levy"); -static const struct file_operations virtio_gpu_driver_fops = { - .owner = THIS_MODULE, - .open = drm_open, - .mmap = virtio_gpu_mmap, - .poll = drm_poll, - .read = drm_read, - .unlocked_ioctl = drm_ioctl, - .release = drm_release, - .compat_ioctl = drm_compat_ioctl, - .llseek = noop_llseek, -}; +DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); static struct drm_driver driver = { .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, @@ -210,15 +199,10 @@ static struct drm_driver driver = { #endif .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table, + .gem_prime_mmap = drm_gem_prime_mmap, .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, - .gem_prime_vmap = virtgpu_gem_prime_vmap, - .gem_prime_vunmap = virtgpu_gem_prime_vunmap, - .gem_prime_mmap = virtgpu_gem_prime_mmap, - .gem_free_object_unlocked = virtio_gpu_gem_free_object, - .gem_open_object = virtio_gpu_gem_object_open, - .gem_close_object = virtio_gpu_gem_object_close, + .gem_create_object = virtio_gpu_create_object, .fops = &virtio_gpu_driver_fops, .ioctls = virtio_gpu_ioctls, diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h index e28829661724..0b56ba005e25 100644 --- a/drivers/gpu/drm/virtio/virtgpu_drv.h +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -35,12 +35,9 @@ #include <drm/drm_encoder.h> #include <drm/drm_fb_helper.h> #include <drm/drm_gem.h> +#include <drm/drm_gem_shmem_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_probe_helper.h> -#include <drm/ttm/ttm_bo_api.h> -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_placement.h> #define DRIVER_NAME "virtio_gpu" #define DRIVER_DESC "virtio GPU" @@ -68,21 +65,23 @@ struct virtio_gpu_object_params { }; struct virtio_gpu_object { - struct drm_gem_object gem_base; + struct drm_gem_shmem_object base; uint32_t hw_res_handle; struct sg_table *pages; uint32_t mapped; - void *vmap; bool dumb; - struct ttm_place placement_code; - struct ttm_placement placement; - struct ttm_buffer_object tbo; - struct ttm_bo_kmap_obj kmap; bool created; }; #define gem_to_virtio_gpu_obj(gobj) \ - container_of((gobj), struct virtio_gpu_object, gem_base) + container_of((gobj), struct virtio_gpu_object, base.base) + +struct virtio_gpu_object_array { + struct ww_acquire_ctx ticket; + struct list_head next; + u32 nents, total; + struct drm_gem_object *objs[]; +}; struct virtio_gpu_vbuffer; struct virtio_gpu_device; @@ -115,9 +114,9 @@ struct virtio_gpu_vbuffer { char *resp_buf; int resp_size; - virtio_gpu_resp_cb resp_cb; + struct virtio_gpu_object_array *objs; struct list_head list; }; @@ -147,10 +146,6 @@ struct virtio_gpu_framebuffer { #define to_virtio_gpu_framebuffer(x) \ container_of(x, struct virtio_gpu_framebuffer, base) -struct virtio_gpu_mman { - struct ttm_bo_device bdev; -}; - struct virtio_gpu_queue { struct virtqueue *vq; spinlock_t qlock; @@ -179,8 +174,6 @@ struct virtio_gpu_device { struct virtio_device *vdev; - struct virtio_gpu_mman mman; - struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS]; uint32_t num_scanouts; @@ -205,6 +198,10 @@ struct virtio_gpu_device { struct work_struct config_changed_work; + struct work_struct obj_free_work; + spinlock_t obj_free_lock; + struct list_head obj_free_list; + struct virtio_gpu_drv_capset *capsets; uint32_t num_capsets; struct list_head cap_cache; @@ -217,9 +214,6 @@ struct virtio_gpu_fpriv { /* virtio_ioctl.c */ #define DRM_VIRTIO_NUM_IOCTLS 10 extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; -int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket, - struct list_head *head); -void virtio_gpu_unref_list(struct list_head *head); /* virtio_kms.c */ int virtio_gpu_init(struct drm_device *dev); @@ -240,10 +234,6 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, struct drm_file *file); void virtio_gpu_gem_object_close(struct drm_gem_object *obj, struct drm_file *file); -struct virtio_gpu_object* -virtio_gpu_alloc_object(struct drm_device *dev, - struct virtio_gpu_object_params *params, - struct virtio_gpu_fence *fence); int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); @@ -251,20 +241,35 @@ int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset_p); +struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents); +struct virtio_gpu_object_array* +virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents); +void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, + struct drm_gem_object *obj); +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, + struct dma_fence *fence); +void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs); +void virtio_gpu_array_put_free_work(struct work_struct *work); + /* virtio vg */ int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev); void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev); void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, uint32_t resource_id); void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint64_t offset, - __le32 width, __le32 height, - __le32 x, __le32 y, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, uint32_t resource_id, @@ -295,28 +300,32 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, uint32_t id); void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id); + struct virtio_gpu_object_array *objs); void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id); + struct virtio_gpu_object_array *objs); void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void *data, uint32_t data_size, - uint32_t ctx_id, struct virtio_gpu_fence *fence); + uint32_t ctx_id, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, - uint32_t resource_id, uint32_t ctx_id, + uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence); void virtio_gpu_ctrl_ack(struct virtqueue *vq); void virtio_gpu_cursor_ack(struct virtqueue *vq); @@ -339,11 +348,6 @@ struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, enum drm_plane_type type, int index); -/* virtio_gpu_ttm.c */ -int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev); -void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev); -int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma); - /* virtio_gpu_fence.c */ bool virtio_fence_signaled(struct dma_fence *f); struct virtio_gpu_fence *virtio_gpu_fence_alloc( @@ -355,70 +359,21 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, u64 last_seq); /* virtio_gpu_object */ +struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, + size_t size); int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, struct virtio_gpu_object_params *params, struct virtio_gpu_object **bo_ptr, struct virtio_gpu_fence *fence); -void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo); -int virtio_gpu_object_kmap(struct virtio_gpu_object *bo); -int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, - struct virtio_gpu_object *bo); -void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo); -int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); /* virtgpu_prime.c */ -struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *virtgpu_gem_prime_import_sg_table( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sgt); -void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); -void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); -int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma); - -static inline struct virtio_gpu_object* -virtio_gpu_object_ref(struct virtio_gpu_object *bo) -{ - ttm_bo_get(&bo->tbo); - return bo; -} - -static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo) -{ - struct ttm_buffer_object *tbo; - - if ((*bo) == NULL) - return; - tbo = &((*bo)->tbo); - ttm_bo_put(tbo); - *bo = NULL; -} static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo) { - return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); -} - -static inline int virtio_gpu_object_reserve(struct virtio_gpu_object *bo, - bool no_wait) -{ - int r; - - r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); - if (unlikely(r != 0)) { - if (r != -ERESTARTSYS) { - struct virtio_gpu_device *qdev = - bo->gem_base.dev->dev_private; - dev_err(qdev->dev, "%p reserve failed\n", bo); - } - return r; - } - return 0; -} - -static inline void virtio_gpu_object_unreserve(struct virtio_gpu_object *bo) -{ - ttm_bo_unreserve(&bo->tbo); + return drm_vma_node_offset_addr(&bo->base.base.vma_node); } /* virgl debufs */ diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c index a0514f5bd006..a4b9881ca1d3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_fence.c +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -41,6 +41,10 @@ bool virtio_fence_signaled(struct dma_fence *f) { struct virtio_gpu_fence *fence = to_virtio_fence(f); + if (WARN_ON_ONCE(fence->f.seqno == 0)) + /* leaked fence outside driver before completing + * initialization with virtio_gpu_fence_emit */ + return false; if (atomic64_read(&fence->drv->last_seq) >= fence->f.seqno) return true; return false; diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c index 292566146814..4c1f579edfb3 100644 --- a/drivers/gpu/drm/virtio/virtgpu_gem.c +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -28,54 +28,31 @@ #include "virtgpu_drv.h" -void virtio_gpu_gem_free_object(struct drm_gem_object *gem_obj) -{ - struct virtio_gpu_object *obj = gem_to_virtio_gpu_obj(gem_obj); - - if (obj) - virtio_gpu_object_unref(&obj); -} - -struct virtio_gpu_object* -virtio_gpu_alloc_object(struct drm_device *dev, - struct virtio_gpu_object_params *params, - struct virtio_gpu_fence *fence) -{ - struct virtio_gpu_device *vgdev = dev->dev_private; - struct virtio_gpu_object *obj; - int ret; - - ret = virtio_gpu_object_create(vgdev, params, &obj, fence); - if (ret) - return ERR_PTR(ret); - - return obj; -} - int virtio_gpu_gem_create(struct drm_file *file, struct drm_device *dev, struct virtio_gpu_object_params *params, struct drm_gem_object **obj_p, uint32_t *handle_p) { + struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_object *obj; int ret; u32 handle; - obj = virtio_gpu_alloc_object(dev, params, NULL); - if (IS_ERR(obj)) - return PTR_ERR(obj); + ret = virtio_gpu_object_create(vgdev, params, &obj, NULL); + if (ret < 0) + return ret; - ret = drm_gem_handle_create(file, &obj->gem_base, &handle); + ret = drm_gem_handle_create(file, &obj->base.base, &handle); if (ret) { - drm_gem_object_release(&obj->gem_base); + drm_gem_object_release(&obj->base.base); return ret; } - *obj_p = &obj->gem_base; + *obj_p = &obj->base.base; /* drop reference from allocate - handle holds it now */ - drm_gem_object_put_unlocked(&obj->gem_base); + drm_gem_object_put_unlocked(&obj->base.base); *handle_p = handle; return 0; @@ -136,19 +113,18 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj, { struct virtio_gpu_device *vgdev = obj->dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; - struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj); - int r; + struct virtio_gpu_object_array *objs; if (!vgdev->has_virgl_3d) return 0; - r = virtio_gpu_object_reserve(qobj, false); - if (r) - return r; + objs = virtio_gpu_array_alloc(1); + if (!objs) + return -ENOMEM; + virtio_gpu_array_add_obj(objs, obj); virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, - qobj->hw_res_handle); - virtio_gpu_object_unreserve(qobj); + objs); return 0; } @@ -157,17 +133,136 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj, { struct virtio_gpu_device *vgdev = obj->dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; - struct virtio_gpu_object *qobj = gem_to_virtio_gpu_obj(obj); - int r; + struct virtio_gpu_object_array *objs; if (!vgdev->has_virgl_3d) return; - r = virtio_gpu_object_reserve(qobj, false); - if (r) + objs = virtio_gpu_array_alloc(1); + if (!objs) return; + virtio_gpu_array_add_obj(objs, obj); virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id, - qobj->hw_res_handle); - virtio_gpu_object_unreserve(qobj); + objs); +} + +struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents) +{ + struct virtio_gpu_object_array *objs; + size_t size = sizeof(*objs) + sizeof(objs->objs[0]) * nents; + + objs = kmalloc(size, GFP_KERNEL); + if (!objs) + return NULL; + + objs->nents = 0; + objs->total = nents; + return objs; +} + +static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs) +{ + kfree(objs); +} + +struct virtio_gpu_object_array* +virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents) +{ + struct virtio_gpu_object_array *objs; + u32 i; + + objs = virtio_gpu_array_alloc(nents); + if (!objs) + return NULL; + + for (i = 0; i < nents; i++) { + objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]); + if (!objs->objs[i]) { + objs->nents = i; + virtio_gpu_array_put_free(objs); + return NULL; + } + } + objs->nents = i; + return objs; +} + +void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, + struct drm_gem_object *obj) +{ + if (WARN_ON_ONCE(objs->nents == objs->total)) + return; + + drm_gem_object_get(obj); + objs->objs[objs->nents] = obj; + objs->nents++; +} + +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs) +{ + int ret; + + if (objs->nents == 1) { + ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL); + } else { + ret = drm_gem_lock_reservations(objs->objs, objs->nents, + &objs->ticket); + } + return ret; +} + +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs) +{ + if (objs->nents == 1) { + dma_resv_unlock(objs->objs[0]->resv); + } else { + drm_gem_unlock_reservations(objs->objs, objs->nents, + &objs->ticket); + } +} + +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, + struct dma_fence *fence) +{ + int i; + + for (i = 0; i < objs->nents; i++) + dma_resv_add_excl_fence(objs->objs[i]->resv, fence); +} + +void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs) +{ + u32 i; + + for (i = 0; i < objs->nents; i++) + drm_gem_object_put_unlocked(objs->objs[i]); + virtio_gpu_array_free(objs); +} + +void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs) +{ + spin_lock(&vgdev->obj_free_lock); + list_add_tail(&objs->next, &vgdev->obj_free_list); + spin_unlock(&vgdev->obj_free_lock); + schedule_work(&vgdev->obj_free_work); +} + +void virtio_gpu_array_put_free_work(struct work_struct *work) +{ + struct virtio_gpu_device *vgdev = + container_of(work, struct virtio_gpu_device, obj_free_work); + struct virtio_gpu_object_array *objs; + + spin_lock(&vgdev->obj_free_lock); + while (!list_empty(&vgdev->obj_free_list)) { + objs = list_first_entry(&vgdev->obj_free_list, + struct virtio_gpu_object_array, next); + list_del(&objs->next); + spin_unlock(&vgdev->obj_free_lock); + virtio_gpu_array_put_free(objs); + spin_lock(&vgdev->obj_free_lock); + } + spin_unlock(&vgdev->obj_free_lock); } diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c index 0a88ef11b9d3..9af1ec62434f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -29,7 +29,6 @@ #include <linux/sync_file.h> #include <drm/drm_file.h> -#include <drm/ttm/ttm_execbuf_util.h> #include <drm/virtgpu_drm.h> #include "virtgpu_drv.h" @@ -56,45 +55,6 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data, &virtio_gpu_map->offset); } -int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket, - struct list_head *head) -{ - struct ttm_operation_ctx ctx = { false, false }; - struct ttm_validate_buffer *buf; - struct ttm_buffer_object *bo; - struct virtio_gpu_object *qobj; - int ret; - - ret = ttm_eu_reserve_buffers(ticket, head, true, NULL, true); - if (ret != 0) - return ret; - - list_for_each_entry(buf, head, head) { - bo = buf->bo; - qobj = container_of(bo, struct virtio_gpu_object, tbo); - ret = ttm_bo_validate(bo, &qobj->placement, &ctx); - if (ret) { - ttm_eu_backoff_reservation(ticket, head); - return ret; - } - } - return 0; -} - -void virtio_gpu_unref_list(struct list_head *head) -{ - struct ttm_validate_buffer *buf; - struct ttm_buffer_object *bo; - struct virtio_gpu_object *qobj; - - list_for_each_entry(buf, head, head) { - bo = buf->bo; - qobj = container_of(bo, struct virtio_gpu_object, tbo); - - drm_gem_object_put_unlocked(&qobj->gem_base); - } -} - /* * Usage of execbuffer: * Relocations need to take into account the full VIRTIO_GPUDrawable size. @@ -107,16 +67,11 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_virtgpu_execbuffer *exbuf = data; struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv; - struct drm_gem_object *gobj; struct virtio_gpu_fence *out_fence; - struct virtio_gpu_object *qobj; int ret; uint32_t *bo_handles = NULL; void __user *user_bo_handles = NULL; - struct list_head validate_list; - struct ttm_validate_buffer *buflist = NULL; - int i; - struct ww_acquire_ctx ticket; + struct virtio_gpu_object_array *buflist = NULL; struct sync_file *sync_file; int in_fence_fd = exbuf->fence_fd; int out_fence_fd = -1; @@ -157,15 +112,10 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, return out_fence_fd; } - INIT_LIST_HEAD(&validate_list); if (exbuf->num_bo_handles) { - bo_handles = kvmalloc_array(exbuf->num_bo_handles, - sizeof(uint32_t), GFP_KERNEL); - buflist = kvmalloc_array(exbuf->num_bo_handles, - sizeof(struct ttm_validate_buffer), - GFP_KERNEL | __GFP_ZERO); - if (!bo_handles || !buflist) { + sizeof(uint32_t), GFP_KERNEL); + if (!bo_handles) { ret = -ENOMEM; goto out_unused_fd; } @@ -177,27 +127,23 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, goto out_unused_fd; } - for (i = 0; i < exbuf->num_bo_handles; i++) { - gobj = drm_gem_object_lookup(drm_file, bo_handles[i]); - if (!gobj) { - ret = -ENOENT; - goto out_unused_fd; - } - - qobj = gem_to_virtio_gpu_obj(gobj); - buflist[i].bo = &qobj->tbo; - - list_add(&buflist[i].head, &validate_list); + buflist = virtio_gpu_array_from_handles(drm_file, bo_handles, + exbuf->num_bo_handles); + if (!buflist) { + ret = -ENOENT; + goto out_unused_fd; } kvfree(bo_handles); bo_handles = NULL; } - ret = virtio_gpu_object_list_validate(&ticket, &validate_list); - if (ret) - goto out_free; + if (buflist) { + ret = virtio_gpu_array_lock_resv(buflist); + if (ret) + goto out_unused_fd; + } - buf = memdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); + buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); if (IS_ERR(buf)) { ret = PTR_ERR(buf); goto out_unresv; @@ -222,24 +168,18 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, } virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, - vfpriv->ctx_id, out_fence); - - ttm_eu_fence_buffer_objects(&ticket, &validate_list, &out_fence->f); - - /* fence the command bo */ - virtio_gpu_unref_list(&validate_list); - kvfree(buflist); + vfpriv->ctx_id, buflist, out_fence); return 0; out_memdup: - kfree(buf); + kvfree(buf); out_unresv: - ttm_eu_backoff_reservation(&ticket, &validate_list); -out_free: - virtio_gpu_unref_list(&validate_list); + if (buflist) + virtio_gpu_array_unlock_resv(buflist); out_unused_fd: kvfree(bo_handles); - kvfree(buflist); + if (buflist) + virtio_gpu_array_put_free(buflist); if (out_fence_fd >= 0) put_unused_fd(out_fence_fd); @@ -316,11 +256,11 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, fence = virtio_gpu_fence_alloc(vgdev); if (!fence) return -ENOMEM; - qobj = virtio_gpu_alloc_object(dev, ¶ms, fence); + ret = virtio_gpu_object_create(vgdev, ¶ms, &qobj, fence); dma_fence_put(&fence->f); - if (IS_ERR(qobj)) - return PTR_ERR(qobj); - obj = &qobj->gem_base; + if (ret < 0) + return ret; + obj = &qobj->base.base; ret = drm_gem_handle_create(file_priv, obj, &handle); if (ret) { @@ -347,7 +287,7 @@ static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, qobj = gem_to_virtio_gpu_obj(gobj); - ri->size = qobj->gem_base.size; + ri->size = qobj->base.base.size; ri->res_handle = qobj->hw_res_handle; drm_gem_object_put_unlocked(gobj); return 0; @@ -360,9 +300,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct drm_virtgpu_3d_transfer_from_host *args = data; - struct ttm_operation_ctx ctx = { true, false }; - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; + struct virtio_gpu_object_array *objs; struct virtio_gpu_fence *fence; int ret; u32 offset = args->offset; @@ -371,39 +309,31 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, if (vgdev->has_virgl_3d == false) return -ENOSYS; - gobj = drm_gem_object_lookup(file, args->bo_handle); - if (gobj == NULL) + objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); + if (objs == NULL) return -ENOENT; - qobj = gem_to_virtio_gpu_obj(gobj); - - ret = virtio_gpu_object_reserve(qobj, false); - if (ret) - goto out; - - ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); - if (unlikely(ret)) - goto out_unres; + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_free; convert_to_hw_box(&box, &args->box); fence = virtio_gpu_fence_alloc(vgdev); if (!fence) { ret = -ENOMEM; - goto out_unres; + goto err_unlock; } virtio_gpu_cmd_transfer_from_host_3d - (vgdev, qobj->hw_res_handle, - vfpriv->ctx_id, offset, args->level, - &box, fence); - dma_resv_add_excl_fence(qobj->tbo.base.resv, - &fence->f); - + (vgdev, vfpriv->ctx_id, offset, args->level, + &box, objs, fence); dma_fence_put(&fence->f); -out_unres: - virtio_gpu_object_unreserve(qobj); -out: - drm_gem_object_put_unlocked(gobj); + return 0; + +err_unlock: + virtio_gpu_array_unlock_resv(objs); +err_put_free: + virtio_gpu_array_put_free(objs); return ret; } @@ -413,75 +343,71 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, struct virtio_gpu_device *vgdev = dev->dev_private; struct virtio_gpu_fpriv *vfpriv = file->driver_priv; struct drm_virtgpu_3d_transfer_to_host *args = data; - struct ttm_operation_ctx ctx = { true, false }; - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; + struct virtio_gpu_object_array *objs; struct virtio_gpu_fence *fence; struct virtio_gpu_box box; int ret; u32 offset = args->offset; - gobj = drm_gem_object_lookup(file, args->bo_handle); - if (gobj == NULL) + objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); + if (objs == NULL) return -ENOENT; - qobj = gem_to_virtio_gpu_obj(gobj); - - ret = virtio_gpu_object_reserve(qobj, false); - if (ret) - goto out; - - ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); - if (unlikely(ret)) - goto out_unres; - convert_to_hw_box(&box, &args->box); if (!vgdev->has_virgl_3d) { virtio_gpu_cmd_transfer_to_host_2d - (vgdev, qobj, offset, - box.w, box.h, box.x, box.y, NULL); + (vgdev, offset, + box.w, box.h, box.x, box.y, + objs, NULL); } else { + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_free; + + ret = -ENOMEM; fence = virtio_gpu_fence_alloc(vgdev); - if (!fence) { - ret = -ENOMEM; - goto out_unres; - } + if (!fence) + goto err_unlock; + virtio_gpu_cmd_transfer_to_host_3d - (vgdev, qobj, + (vgdev, vfpriv ? vfpriv->ctx_id : 0, offset, - args->level, &box, fence); - dma_resv_add_excl_fence(qobj->tbo.base.resv, - &fence->f); + args->level, &box, objs, fence); dma_fence_put(&fence->f); } + return 0; -out_unres: - virtio_gpu_object_unreserve(qobj); -out: - drm_gem_object_put_unlocked(gobj); +err_unlock: + virtio_gpu_array_unlock_resv(objs); +err_put_free: + virtio_gpu_array_put_free(objs); return ret; } static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) + struct drm_file *file) { struct drm_virtgpu_3d_wait *args = data; - struct drm_gem_object *gobj = NULL; - struct virtio_gpu_object *qobj = NULL; + struct drm_gem_object *obj; + long timeout = 15 * HZ; int ret; - bool nowait = false; - gobj = drm_gem_object_lookup(file, args->handle); - if (gobj == NULL) + obj = drm_gem_object_lookup(file, args->handle); + if (obj == NULL) return -ENOENT; - qobj = gem_to_virtio_gpu_obj(gobj); - - if (args->flags & VIRTGPU_WAIT_NOWAIT) - nowait = true; - ret = virtio_gpu_object_wait(qobj, nowait); + if (args->flags & VIRTGPU_WAIT_NOWAIT) { + ret = dma_resv_test_signaled_rcu(obj->resv, true); + } else { + ret = dma_resv_wait_timeout_rcu(obj->resv, true, true, + timeout); + } + if (ret == 0) + ret = -EBUSY; + else if (ret > 0) + ret = 0; - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put_unlocked(obj); return ret; } diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c index c190702fab72..2f5773e43557 100644 --- a/drivers/gpu/drm/virtio/virtgpu_kms.c +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -147,19 +147,23 @@ int virtio_gpu_init(struct drm_device *dev) INIT_WORK(&vgdev->config_changed_work, virtio_gpu_config_changed_work_func); + INIT_WORK(&vgdev->obj_free_work, + virtio_gpu_array_put_free_work); + INIT_LIST_HEAD(&vgdev->obj_free_list); + spin_lock_init(&vgdev->obj_free_lock); + #ifdef __LITTLE_ENDIAN if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL)) vgdev->has_virgl_3d = true; - DRM_INFO("virgl 3d acceleration %s\n", - vgdev->has_virgl_3d ? "enabled" : "not supported by host"); -#else - DRM_INFO("virgl 3d acceleration not supported by guest\n"); #endif if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) { vgdev->has_edid = true; - DRM_INFO("EDID support available.\n"); } + DRM_INFO("features: %cvirgl %cedid\n", + vgdev->has_virgl_3d ? '+' : '-', + vgdev->has_edid ? '+' : '-'); + ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); if (ret) { DRM_ERROR("failed to find virt queues\n"); @@ -173,12 +177,6 @@ int virtio_gpu_init(struct drm_device *dev) goto err_vbufs; } - ret = virtio_gpu_ttm_init(vgdev); - if (ret) { - DRM_ERROR("failed to init ttm %d\n", ret); - goto err_ttm; - } - /* get display info */ virtio_cread(vgdev->vdev, struct virtio_gpu_config, num_scanouts, &num_scanouts); @@ -210,8 +208,6 @@ int virtio_gpu_init(struct drm_device *dev) return 0; err_scanouts: - virtio_gpu_ttm_fini(vgdev); -err_ttm: virtio_gpu_free_vbufs(vgdev); err_vbufs: vgdev->vdev->config->del_vqs(vgdev->vdev); @@ -234,6 +230,7 @@ void virtio_gpu_deinit(struct drm_device *dev) { struct virtio_gpu_device *vgdev = dev->dev_private; + flush_work(&vgdev->obj_free_work); vgdev->vqs_ready = false; flush_work(&vgdev->ctrlq.dequeue_work); flush_work(&vgdev->cursorq.dequeue_work); @@ -242,7 +239,6 @@ void virtio_gpu_deinit(struct drm_device *dev) vgdev->vdev->config->del_vqs(vgdev->vdev); virtio_gpu_modeset_fini(vgdev); - virtio_gpu_ttm_fini(vgdev); virtio_gpu_free_vbufs(vgdev); virtio_gpu_cleanup_cap_cache(vgdev); kfree(vgdev->capsets); diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c index 09b526518f5a..017a9e0fc3bb 100644 --- a/drivers/gpu/drm/virtio/virtgpu_object.c +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -23,73 +23,83 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ -#include <drm/ttm/ttm_execbuf_util.h> +#include <linux/moduleparam.h> #include "virtgpu_drv.h" +static int virtio_gpu_virglrenderer_workaround = 1; +module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400); + static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) { -#if 0 - int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); - - if (handle < 0) - return handle; -#else - static int handle; - - /* - * FIXME: dirty hack to avoid re-using IDs, virglrenderer - * can't deal with that. Needs fixing in virglrenderer, also - * should figure a better way to handle that in the guest. - */ - handle++; -#endif - - *resid = handle + 1; + if (virtio_gpu_virglrenderer_workaround) { + /* + * Hack to avoid re-using resource IDs. + * + * virglrenderer versions up to (and including) 0.7.0 + * can't deal with that. virglrenderer commit + * "f91a9dd35715 Fix unlinking resources from hash + * table." (Feb 2019) fixes the bug. + */ + static int handle; + handle++; + *resid = handle + 1; + } else { + int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); + if (handle < 0) + return handle; + *resid = handle + 1; + } return 0; } static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) { -#if 0 - ida_free(&vgdev->resource_ida, id - 1); -#endif + if (!virtio_gpu_virglrenderer_workaround) { + ida_free(&vgdev->resource_ida, id - 1); + } } -static void virtio_gpu_ttm_bo_destroy(struct ttm_buffer_object *tbo) +static void virtio_gpu_free_object(struct drm_gem_object *obj) { - struct virtio_gpu_object *bo; - struct virtio_gpu_device *vgdev; - - bo = container_of(tbo, struct virtio_gpu_object, tbo); - vgdev = (struct virtio_gpu_device *)bo->gem_base.dev->dev_private; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + if (bo->pages) + virtio_gpu_object_detach(vgdev, bo); if (bo->created) virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle); - if (bo->pages) - virtio_gpu_object_free_sg_table(bo); - if (bo->vmap) - virtio_gpu_object_kunmap(bo); - drm_gem_object_release(&bo->gem_base); virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); - kfree(bo); + + drm_gem_shmem_free_object(obj); } -static void virtio_gpu_init_ttm_placement(struct virtio_gpu_object *vgbo) +static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = { + .free = virtio_gpu_free_object, + .open = virtio_gpu_gem_object_open, + .close = virtio_gpu_gem_object_close, + + .print_info = drm_gem_shmem_print_info, + .pin = drm_gem_shmem_pin, + .unpin = drm_gem_shmem_unpin, + .get_sg_table = drm_gem_shmem_get_sg_table, + .vmap = drm_gem_shmem_vmap, + .vunmap = drm_gem_shmem_vunmap, + .mmap = &drm_gem_shmem_mmap, +}; + +struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, + size_t size) { - u32 c = 1; - - vgbo->placement.placement = &vgbo->placement_code; - vgbo->placement.busy_placement = &vgbo->placement_code; - vgbo->placement_code.fpfn = 0; - vgbo->placement_code.lpfn = 0; - vgbo->placement_code.flags = - TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT | - TTM_PL_FLAG_NO_EVICT; - vgbo->placement.num_placement = c; - vgbo->placement.num_busy_placement = c; + struct virtio_gpu_object *bo; + + bo = kzalloc(sizeof(*bo), GFP_KERNEL); + if (!bo) + return NULL; + bo->base.base.funcs = &virtio_gpu_gem_funcs; + return &bo->base.base; } int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, @@ -97,157 +107,59 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, struct virtio_gpu_object **bo_ptr, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object_array *objs = NULL; + struct drm_gem_shmem_object *shmem_obj; struct virtio_gpu_object *bo; - size_t acc_size; int ret; *bo_ptr = NULL; - acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, params->size, - sizeof(struct virtio_gpu_object)); + params->size = roundup(params->size, PAGE_SIZE); + shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size); + if (IS_ERR(shmem_obj)) + return PTR_ERR(shmem_obj); + bo = gem_to_virtio_gpu_obj(&shmem_obj->base); - bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL); - if (bo == NULL) - return -ENOMEM; ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); - if (ret < 0) { - kfree(bo); - return ret; - } - params->size = roundup(params->size, PAGE_SIZE); - ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, params->size); - if (ret != 0) { - virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); - kfree(bo); - return ret; - } + if (ret < 0) + goto err_free_gem; + bo->dumb = params->dumb; + if (fence) { + ret = -ENOMEM; + objs = virtio_gpu_array_alloc(1); + if (!objs) + goto err_put_id; + virtio_gpu_array_add_obj(objs, &bo->base.base); + + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_objs; + } + if (params->virgl) { - virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, fence); + virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, + objs, fence); } else { - virtio_gpu_cmd_create_resource(vgdev, bo, params, fence); + virtio_gpu_cmd_create_resource(vgdev, bo, params, + objs, fence); } - virtio_gpu_init_ttm_placement(bo); - ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, params->size, - ttm_bo_type_device, &bo->placement, 0, - true, acc_size, NULL, NULL, - &virtio_gpu_ttm_bo_destroy); - /* ttm_bo_init failure will call the destroy */ - if (ret != 0) + ret = virtio_gpu_object_attach(vgdev, bo, NULL); + if (ret != 0) { + virtio_gpu_free_object(&shmem_obj->base); return ret; - - if (fence) { - struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; - struct list_head validate_list; - struct ttm_validate_buffer mainbuf; - struct ww_acquire_ctx ticket; - unsigned long irq_flags; - bool signaled; - - INIT_LIST_HEAD(&validate_list); - memset(&mainbuf, 0, sizeof(struct ttm_validate_buffer)); - - /* use a gem reference since unref list undoes them */ - drm_gem_object_get(&bo->gem_base); - mainbuf.bo = &bo->tbo; - list_add(&mainbuf.head, &validate_list); - - ret = virtio_gpu_object_list_validate(&ticket, &validate_list); - if (ret == 0) { - spin_lock_irqsave(&drv->lock, irq_flags); - signaled = virtio_fence_signaled(&fence->f); - if (!signaled) - /* virtio create command still in flight */ - ttm_eu_fence_buffer_objects(&ticket, &validate_list, - &fence->f); - spin_unlock_irqrestore(&drv->lock, irq_flags); - if (signaled) - /* virtio create command finished */ - ttm_eu_backoff_reservation(&ticket, &validate_list); - } - virtio_gpu_unref_list(&validate_list); } *bo_ptr = bo; return 0; -} - -void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo) -{ - bo->vmap = NULL; - ttm_bo_kunmap(&bo->kmap); -} - -int virtio_gpu_object_kmap(struct virtio_gpu_object *bo) -{ - bool is_iomem; - int r; - - WARN_ON(bo->vmap); - - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); - if (r) - return r; - bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); - return 0; -} -int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev, - struct virtio_gpu_object *bo) -{ - int ret; - struct page **pages = bo->tbo.ttm->pages; - int nr_pages = bo->tbo.num_pages; - struct ttm_operation_ctx ctx = { - .interruptible = false, - .no_wait_gpu = false - }; - size_t max_segment; - - /* wtf swapping */ - if (bo->pages) - return 0; - - if (bo->tbo.ttm->state == tt_unpopulated) - bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx); - bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL); - if (!bo->pages) - goto out; - - max_segment = virtio_max_dma_size(qdev->vdev); - max_segment &= PAGE_MASK; - if (max_segment > SCATTERLIST_MAX_SEGMENT) - max_segment = SCATTERLIST_MAX_SEGMENT; - ret = __sg_alloc_table_from_pages(bo->pages, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, - max_segment, GFP_KERNEL); - if (ret) - goto out; - return 0; -out: - kfree(bo->pages); - bo->pages = NULL; - return -ENOMEM; -} - -void virtio_gpu_object_free_sg_table(struct virtio_gpu_object *bo) -{ - sg_free_table(bo->pages); - kfree(bo->pages); - bo->pages = NULL; -} - -int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait) -{ - int r; - - r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); - if (unlikely(r != 0)) - return r; - r = ttm_bo_wait(&bo->tbo, true, no_wait); - ttm_bo_unreserve(&bo->tbo); - return r; +err_put_objs: + virtio_gpu_array_put_free(objs); +err_put_id: + virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); +err_free_gem: + drm_gem_shmem_free_object(&shmem_obj->base); + return ret; } - diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c index a492ac3f4a7e..390524143139 100644 --- a/drivers/gpu/drm/virtio/virtgpu_plane.c +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -84,7 +84,22 @@ static const struct drm_plane_funcs virtio_gpu_plane_funcs = { static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, struct drm_plane_state *state) { - return 0; + bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; + struct drm_crtc_state *crtc_state; + int ret; + + if (!state->fb || !state->crtc) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + ret = drm_atomic_helper_check_plane_state(state, crtc_state, + DRM_PLANE_HELPER_NO_SCALING, + DRM_PLANE_HELPER_NO_SCALING, + is_cursor, true); + return ret; } static void virtio_gpu_primary_plane_update(struct drm_plane *plane, @@ -109,12 +124,19 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane, bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); handle = bo->hw_res_handle; if (bo->dumb) { + struct virtio_gpu_object_array *objs; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return; + virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); virtio_gpu_cmd_transfer_to_host_2d - (vgdev, bo, 0, - cpu_to_le32(plane->state->src_w >> 16), - cpu_to_le32(plane->state->src_h >> 16), - cpu_to_le32(plane->state->src_x >> 16), - cpu_to_le32(plane->state->src_y >> 16), NULL); + (vgdev, 0, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16, + objs, NULL); } } else { handle = 0; @@ -186,7 +208,6 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, struct virtio_gpu_framebuffer *vgfb; struct virtio_gpu_object *bo = NULL; uint32_t handle; - int ret = 0; if (plane->state->crtc) output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); @@ -205,20 +226,20 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { /* new cursor -- update & wait */ + struct virtio_gpu_object_array *objs; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return; + virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); virtio_gpu_cmd_transfer_to_host_2d - (vgdev, bo, 0, - cpu_to_le32(plane->state->crtc_w), - cpu_to_le32(plane->state->crtc_h), - 0, 0, vgfb->fence); - ret = virtio_gpu_object_reserve(bo, false); - if (!ret) { - dma_resv_add_excl_fence(bo->tbo.base.resv, - &vgfb->fence->f); - dma_fence_put(&vgfb->fence->f); - vgfb->fence = NULL; - virtio_gpu_object_unreserve(bo); - virtio_gpu_object_wait(bo, false); - } + (vgdev, 0, + plane->state->crtc_w, + plane->state->crtc_h, + 0, 0, objs, vgfb->fence); + dma_fence_wait(&vgfb->fence->f, true); + dma_fence_put(&vgfb->fence->f); + vgfb->fence = NULL; } if (plane->state->fb != old_state->fb) { diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c index dc642a884b88..050d24c39a8f 100644 --- a/drivers/gpu/drm/virtio/virtgpu_prime.c +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -30,43 +30,9 @@ * device that might share buffers with virtgpu */ -struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj) -{ - struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); - - if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages) - /* should not happen */ - return ERR_PTR(-EINVAL); - - return drm_prime_pages_to_sg(bo->tbo.ttm->pages, - bo->tbo.ttm->num_pages); -} - struct drm_gem_object *virtgpu_gem_prime_import_sg_table( struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *table) { return ERR_PTR(-ENODEV); } - -void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) -{ - struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); - int ret; - - ret = virtio_gpu_object_kmap(bo); - if (ret) - return NULL; - return bo->vmap; -} - -void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) -{ - virtio_gpu_object_kunmap(gem_to_virtio_gpu_obj(obj)); -} - -int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, - struct vm_area_struct *vma) -{ - return drm_gem_prime_mmap(obj, vma); -} diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c deleted file mode 100644 index f87903641847..000000000000 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ /dev/null @@ -1,305 +0,0 @@ -/* - * Copyright (C) 2015 Red Hat, Inc. - * All Rights Reserved. - * - * Authors: - * Dave Airlie - * Alon Levy - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include <linux/delay.h> - -#include <drm/drm.h> -#include <drm/drm_file.h> -#include <drm/ttm/ttm_bo_api.h> -#include <drm/ttm/ttm_bo_driver.h> -#include <drm/ttm/ttm_module.h> -#include <drm/ttm/ttm_page_alloc.h> -#include <drm/ttm/ttm_placement.h> -#include <drm/virtgpu_drm.h> - -#include "virtgpu_drv.h" - -static struct -virtio_gpu_device *virtio_gpu_get_vgdev(struct ttm_bo_device *bdev) -{ - struct virtio_gpu_mman *mman; - struct virtio_gpu_device *vgdev; - - mman = container_of(bdev, struct virtio_gpu_mman, bdev); - vgdev = container_of(mman, struct virtio_gpu_device, mman); - return vgdev; -} - -int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *file_priv; - struct virtio_gpu_device *vgdev; - int r; - - file_priv = filp->private_data; - vgdev = file_priv->minor->dev->dev_private; - if (vgdev == NULL) { - DRM_ERROR( - "filp->private_data->minor->dev->dev_private == NULL\n"); - return -EINVAL; - } - r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev); - - return r; -} - -static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev, - uint32_t flags) -{ - return 0; -} - -static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, - struct ttm_buffer_object *bo, - const struct ttm_place *place, - struct ttm_mem_reg *mem) -{ - mem->mm_node = (void *)1; - return 0; -} - -static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man, - struct ttm_mem_reg *mem) -{ - mem->mm_node = (void *)NULL; -} - -static int ttm_bo_man_init(struct ttm_mem_type_manager *man, - unsigned long p_size) -{ - return 0; -} - -static int ttm_bo_man_takedown(struct ttm_mem_type_manager *man) -{ - return 0; -} - -static void ttm_bo_man_debug(struct ttm_mem_type_manager *man, - struct drm_printer *printer) -{ -} - -static const struct ttm_mem_type_manager_func virtio_gpu_bo_manager_func = { - .init = ttm_bo_man_init, - .takedown = ttm_bo_man_takedown, - .get_node = ttm_bo_man_get_node, - .put_node = ttm_bo_man_put_node, - .debug = ttm_bo_man_debug -}; - -static int virtio_gpu_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, - struct ttm_mem_type_manager *man) -{ - switch (type) { - case TTM_PL_SYSTEM: - /* System memory */ - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - break; - case TTM_PL_TT: - man->func = &virtio_gpu_bo_manager_func; - man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_MASK_CACHING; - man->default_caching = TTM_PL_FLAG_CACHED; - break; - default: - DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type); - return -EINVAL; - } - return 0; -} - -static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo, - struct ttm_placement *placement) -{ - static const struct ttm_place placements = { - .fpfn = 0, - .lpfn = 0, - .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM, - }; - - placement->placement = &placements; - placement->busy_placement = &placements; - placement->num_placement = 1; - placement->num_busy_placement = 1; -} - -static int virtio_gpu_verify_access(struct ttm_buffer_object *bo, - struct file *filp) -{ - return 0; -} - -static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) -{ - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; - - mem->bus.addr = NULL; - mem->bus.offset = 0; - mem->bus.size = mem->num_pages << PAGE_SHIFT; - mem->bus.base = 0; - mem->bus.is_iomem = false; - if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) - return -EINVAL; - switch (mem->mem_type) { - case TTM_PL_SYSTEM: - case TTM_PL_TT: - /* system memory */ - return 0; - default: - return -EINVAL; - } - return 0; -} - -static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev, - struct ttm_mem_reg *mem) -{ -} - -/* - * TTM backend functions. - */ -struct virtio_gpu_ttm_tt { - struct ttm_dma_tt ttm; - struct virtio_gpu_object *obj; -}; - -static int virtio_gpu_ttm_tt_bind(struct ttm_tt *ttm, - struct ttm_mem_reg *bo_mem) -{ - struct virtio_gpu_ttm_tt *gtt = - container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm); - struct virtio_gpu_device *vgdev = - virtio_gpu_get_vgdev(gtt->obj->tbo.bdev); - - virtio_gpu_object_attach(vgdev, gtt->obj, NULL); - return 0; -} - -static int virtio_gpu_ttm_tt_unbind(struct ttm_tt *ttm) -{ - struct virtio_gpu_ttm_tt *gtt = - container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm); - struct virtio_gpu_device *vgdev = - virtio_gpu_get_vgdev(gtt->obj->tbo.bdev); - - virtio_gpu_object_detach(vgdev, gtt->obj); - return 0; -} - -static void virtio_gpu_ttm_tt_destroy(struct ttm_tt *ttm) -{ - struct virtio_gpu_ttm_tt *gtt = - container_of(ttm, struct virtio_gpu_ttm_tt, ttm.ttm); - - ttm_dma_tt_fini(>t->ttm); - kfree(gtt); -} - -static struct ttm_backend_func virtio_gpu_tt_func = { - .bind = &virtio_gpu_ttm_tt_bind, - .unbind = &virtio_gpu_ttm_tt_unbind, - .destroy = &virtio_gpu_ttm_tt_destroy, -}; - -static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo, - uint32_t page_flags) -{ - struct virtio_gpu_device *vgdev; - struct virtio_gpu_ttm_tt *gtt; - - vgdev = virtio_gpu_get_vgdev(bo->bdev); - gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL); - if (gtt == NULL) - return NULL; - gtt->ttm.ttm.func = &virtio_gpu_tt_func; - gtt->obj = container_of(bo, struct virtio_gpu_object, tbo); - if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) { - kfree(gtt); - return NULL; - } - return >t->ttm.ttm; -} - -static void virtio_gpu_bo_swap_notify(struct ttm_buffer_object *tbo) -{ - struct virtio_gpu_object *bo; - - bo = container_of(tbo, struct virtio_gpu_object, tbo); - - if (bo->pages) - virtio_gpu_object_free_sg_table(bo); -} - -static struct ttm_bo_driver virtio_gpu_bo_driver = { - .ttm_tt_create = &virtio_gpu_ttm_tt_create, - .invalidate_caches = &virtio_gpu_invalidate_caches, - .init_mem_type = &virtio_gpu_init_mem_type, - .eviction_valuable = ttm_bo_eviction_valuable, - .evict_flags = &virtio_gpu_evict_flags, - .verify_access = &virtio_gpu_verify_access, - .io_mem_reserve = &virtio_gpu_ttm_io_mem_reserve, - .io_mem_free = &virtio_gpu_ttm_io_mem_free, - .swap_notify = &virtio_gpu_bo_swap_notify, -}; - -int virtio_gpu_ttm_init(struct virtio_gpu_device *vgdev) -{ - int r; - - /* No others user of address space so set it to 0 */ - r = ttm_bo_device_init(&vgdev->mman.bdev, - &virtio_gpu_bo_driver, - vgdev->ddev->anon_inode->i_mapping, - false); - if (r) { - DRM_ERROR("failed initializing buffer object driver(%d).\n", r); - goto err_dev_init; - } - - r = ttm_bo_init_mm(&vgdev->mman.bdev, TTM_PL_TT, 0); - if (r) { - DRM_ERROR("Failed initializing GTT heap.\n"); - goto err_mm_init; - } - return 0; - -err_mm_init: - ttm_bo_device_release(&vgdev->mman.bdev); -err_dev_init: - return r; -} - -void virtio_gpu_ttm_fini(struct virtio_gpu_device *vgdev) -{ - ttm_bo_device_release(&vgdev->mman.bdev); - DRM_INFO("virtio_gpu: ttm finalized\n"); -} diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index 7ac20490e1b4..74ad3bc3ebe8 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -155,7 +155,7 @@ static void free_vbuf(struct virtio_gpu_device *vgdev, { if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) kfree(vbuf->resp_buf); - kfree(vbuf->data_buf); + kvfree(vbuf->data_buf); kmem_cache_free(vgdev->vbufs, vbuf); } @@ -192,7 +192,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); spin_unlock(&vgdev->ctrlq.qlock); - list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_for_each_entry(entry, &reclaim_list, list) { resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); @@ -219,14 +219,18 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) } if (entry->resp_cb) entry->resp_cb(vgdev, entry); - - list_del(&entry->list); - free_vbuf(vgdev, entry); } wake_up(&vgdev->ctrlq.ack_queue); if (fence_id) virtio_gpu_fence_event_process(vgdev, fence_id); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + virtio_gpu_array_put_free_delayed(vgdev, entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } } void virtio_gpu_dequeue_cursor_func(struct work_struct *work) @@ -252,26 +256,67 @@ void virtio_gpu_dequeue_cursor_func(struct work_struct *work) wake_up(&vgdev->cursorq.ack_queue); } -static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) +/* Create sg_table from a vmalloc'd buffer. */ +static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) +{ + int ret, s, i; + struct sg_table *sgt; + struct scatterlist *sg; + struct page *pg; + + if (WARN_ON(!PAGE_ALIGNED(data))) + return NULL; + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return NULL; + + *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE); + ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL); + if (ret) { + kfree(sgt); + return NULL; + } + + for_each_sg(sgt->sgl, sg, *sg_ents, i) { + pg = vmalloc_to_page(data); + if (!pg) { + sg_free_table(sgt); + kfree(sgt); + return NULL; + } + + s = min_t(int, PAGE_SIZE, size); + sg_set_page(sg, pg, s, 0); + + size -= s; + data += s; + } + + return sgt; +} + +static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct scatterlist *vout) __releases(&vgdev->ctrlq.qlock) __acquires(&vgdev->ctrlq.qlock) { struct virtqueue *vq = vgdev->ctrlq.vq; - struct scatterlist *sgs[3], vcmd, vout, vresp; + struct scatterlist *sgs[3], vcmd, vresp; int outcnt = 0, incnt = 0; + bool notify = false; int ret; if (!vgdev->vqs_ready) - return -ENODEV; + return notify; sg_init_one(&vcmd, vbuf->buf, vbuf->size); sgs[outcnt + incnt] = &vcmd; outcnt++; - if (vbuf->data_size) { - sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); - sgs[outcnt + incnt] = &vout; + if (vout) { + sgs[outcnt + incnt] = vout; outcnt++; } @@ -292,32 +337,35 @@ retry: trace_virtio_gpu_cmd_queue(vq, (struct virtio_gpu_ctrl_hdr *)vbuf->buf); - virtqueue_kick(vq); + notify = virtqueue_kick_prepare(vq); } - - if (!ret) - ret = vq->num_free; - return ret; -} - -static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) -{ - int rc; - - spin_lock(&vgdev->ctrlq.qlock); - rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); - spin_unlock(&vgdev->ctrlq.qlock); - return rc; + return notify; } -static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf, - struct virtio_gpu_ctrl_hdr *hdr, - struct virtio_gpu_fence *fence) +static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct virtio_gpu_ctrl_hdr *hdr, + struct virtio_gpu_fence *fence) { struct virtqueue *vq = vgdev->ctrlq.vq; - int rc; + struct scatterlist *vout = NULL, sg; + struct sg_table *sgt = NULL; + bool notify; + int outcnt = 0; + + if (vbuf->data_size) { + if (is_vmalloc_addr(vbuf->data_buf)) { + sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, + &outcnt); + if (!sgt) + return; + vout = sgt->sgl; + } else { + sg_init_one(&sg, vbuf->data_buf, vbuf->data_size); + vout = &sg; + outcnt = 1; + } + } again: spin_lock(&vgdev->ctrlq.qlock); @@ -330,29 +378,47 @@ again: * to wait for free space, which can result in fence ids being * submitted out-of-order. */ - if (vq->num_free < 3) { + if (vq->num_free < 2 + outcnt) { spin_unlock(&vgdev->ctrlq.qlock); wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3); goto again; } - if (fence) + if (hdr && fence) { virtio_gpu_fence_emit(vgdev, hdr, fence); - rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf); + if (vbuf->objs) { + virtio_gpu_array_add_fence(vbuf->objs, &fence->f); + virtio_gpu_array_unlock_resv(vbuf->objs); + } + } + notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout); spin_unlock(&vgdev->ctrlq.qlock); - return rc; + if (notify) + virtqueue_notify(vgdev->ctrlq.vq); + + if (sgt) { + sg_free_table(sgt); + kfree(sgt); + } +} + +static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL); } -static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, - struct virtio_gpu_vbuffer *vbuf) +static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) { struct virtqueue *vq = vgdev->cursorq.vq; struct scatterlist *sgs[1], ccmd; + bool notify; int ret; int outcnt; if (!vgdev->vqs_ready) - return -ENODEV; + return; sg_init_one(&ccmd, vbuf->buf, vbuf->size); sgs[0] = &ccmd; @@ -370,14 +436,13 @@ retry: trace_virtio_gpu_cmd_queue(vq, (struct virtio_gpu_ctrl_hdr *)vbuf->buf); - virtqueue_kick(vq); + notify = virtqueue_kick_prepare(vq); } spin_unlock(&vgdev->cursorq.qlock); - if (!ret) - ret = vq->num_free; - return ret; + if (notify) + virtqueue_notify(vq); } /* just create gem objects for userspace and long lived objects, @@ -388,6 +453,7 @@ retry: void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_create_2d *cmd_p; @@ -395,6 +461,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); @@ -481,12 +548,13 @@ void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, } void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint64_t offset, - __le32 width, __le32 height, - __le32 x, __le32 y, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_to_host_2d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); @@ -498,14 +566,15 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); cmd_p->offset = cpu_to_le64(offset); - cmd_p->r.width = width; - cmd_p->r.height = height; - cmd_p->r.x = x; - cmd_p->r.y = y; + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence); } @@ -826,34 +895,38 @@ void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id) + struct virtio_gpu_object_array *objs) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_ctx_resource *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, uint32_t ctx_id, - uint32_t resource_id) + struct virtio_gpu_object_array *objs) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_ctx_resource *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); } @@ -861,6 +934,7 @@ void virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, struct virtio_gpu_object *bo, struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { struct virtio_gpu_resource_create_3d *cmd_p; @@ -868,6 +942,7 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); @@ -888,12 +963,13 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, } void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, - struct virtio_gpu_object *bo, uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); @@ -906,6 +982,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); @@ -917,20 +995,24 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, } void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, - uint32_t resource_id, uint32_t ctx_id, + uint32_t ctx_id, uint64_t offset, uint32_t level, struct virtio_gpu_box *box, + struct virtio_gpu_object_array *objs, struct virtio_gpu_fence *fence) { + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); struct virtio_gpu_transfer_host_3d *cmd_p; struct virtio_gpu_vbuffer *vbuf; cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); - cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); cmd_p->box = *box; cmd_p->offset = cpu_to_le64(offset); cmd_p->level = cpu_to_le32(level); @@ -940,7 +1022,9 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, void *data, uint32_t data_size, - uint32_t ctx_id, struct virtio_gpu_fence *fence) + uint32_t ctx_id, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) { struct virtio_gpu_cmd_submit *cmd_p; struct virtio_gpu_vbuffer *vbuf; @@ -950,6 +1034,7 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, vbuf->data_buf = data; vbuf->data_size = data_size; + vbuf->objs = objs; cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); @@ -965,17 +1050,21 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); struct virtio_gpu_mem_entry *ents; struct scatterlist *sg; - int si, nents; + int si, nents, ret; if (WARN_ON_ONCE(!obj->created)) return -EINVAL; + if (WARN_ON_ONCE(obj->pages)) + return -EINVAL; - if (!obj->pages) { - int ret; + ret = drm_gem_shmem_pin(&obj->base.base); + if (ret < 0) + return -EINVAL; - ret = virtio_gpu_object_get_sg_table(vgdev, obj); - if (ret) - return ret; + obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base); + if (obj->pages == NULL) { + drm_gem_shmem_unpin(&obj->base.base); + return -EINVAL; } if (use_dma_api) { @@ -1014,6 +1103,9 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, { bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev); + if (WARN_ON_ONCE(!obj->pages)) + return; + if (use_dma_api && obj->mapped) { struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev); /* detach backing and wait for the host process it ... */ @@ -1029,6 +1121,11 @@ void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev, } else { virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL); } + + sg_free_table(obj->pages); + obj->pages = NULL; + + drm_gem_shmem_unpin(&obj->base.base); } void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c index 927dafaebc76..74f703b8d22a 100644 --- a/drivers/gpu/drm/vkms/vkms_crtc.c +++ b/drivers/gpu/drm/vkms/vkms_crtc.c @@ -16,17 +16,18 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) u64 ret_overrun; bool ret; - spin_lock(&output->lock); - ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, output->period_ns); WARN_ON(ret_overrun != 1); + spin_lock(&output->lock); ret = drm_crtc_handle_vblank(crtc); if (!ret) DRM_ERROR("vkms failure on handling vblank"); state = output->composer_state; + spin_unlock(&output->lock); + if (state && output->composer_enabled) { u64 frame = drm_crtc_accurate_vblank_count(crtc); @@ -48,8 +49,6 @@ static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer) DRM_DEBUG_DRIVER("Composer worker already queued\n"); } - spin_unlock(&output->lock); - return HRTIMER_RESTART; } @@ -85,7 +84,7 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe, struct vkms_output *output = &vkmsdev->output; struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; - *vblank_time = output->vblank_hrtimer.node.expires; + *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires); if (WARN_ON(*vblank_time == vblank->time)) return true; diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c index 44ab9f8ef8be..d1fe144aa289 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.c +++ b/drivers/gpu/drm/vkms/vkms_drv.c @@ -11,13 +11,14 @@ #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/dma-mapping.h> +#include <drm/drm_gem.h> #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_drv.h> #include <drm/drm_fb_helper.h> #include <drm/drm_file.h> -#include <drm/drm_gem.h> #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_ioctl.h> #include <drm/drm_probe_helper.h> @@ -83,7 +84,7 @@ static void vkms_atomic_commit_tail(struct drm_atomic_state *old_state) drm_atomic_helper_commit_hw_done(old_state); - drm_atomic_helper_wait_for_vblanks(dev, old_state); + drm_atomic_helper_wait_for_flip_done(dev, old_state); for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { struct vkms_crtc_state *vkms_state = @@ -103,6 +104,8 @@ static struct drm_driver vkms_driver = { .gem_vm_ops = &vkms_gem_vm_ops, .gem_free_object_unlocked = vkms_gem_free_object, .get_vblank_timestamp = vkms_get_vblank_timestamp, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import_sg_table = vkms_prime_import_sg_table, .name = DRIVER_NAME, .desc = DRIVER_DESC, @@ -157,6 +160,14 @@ static int __init vkms_init(void) if (ret) goto out_unregister; + ret = dma_coerce_mask_and_coherent(vkms_device->drm.dev, + DMA_BIT_MASK(64)); + + if (ret) { + DRM_ERROR("Could not initialize DMA support\n"); + goto out_fini; + } + vkms_device->drm.irq_enabled = true; ret = drm_vblank_init(&vkms_device->drm, 1); diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h index 5a95100fa18b..7d52e24564db 100644 --- a/drivers/gpu/drm/vkms/vkms_drv.h +++ b/drivers/gpu/drm/vkms/vkms_drv.h @@ -137,6 +137,12 @@ int vkms_gem_vmap(struct drm_gem_object *obj); void vkms_gem_vunmap(struct drm_gem_object *obj); +/* Prime */ +struct drm_gem_object * +vkms_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg); + /* CRC Support */ const char *const *vkms_get_crc_sources(struct drm_crtc *crtc, size_t *count); diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c index 6489bfe0a149..2e01186fb943 100644 --- a/drivers/gpu/drm/vkms/vkms_gem.c +++ b/drivers/gpu/drm/vkms/vkms_gem.c @@ -1,7 +1,9 @@ // SPDX-License-Identifier: GPL-2.0+ +#include <linux/dma-buf.h> #include <linux/shmem_fs.h> #include <linux/vmalloc.h> +#include <drm/drm_prime.h> #include "vkms_drv.h" @@ -218,3 +220,28 @@ out: mutex_unlock(&vkms_obj->pages_lock); return ret; } + +struct drm_gem_object * +vkms_prime_import_sg_table(struct drm_device *dev, + struct dma_buf_attachment *attach, + struct sg_table *sg) +{ + struct vkms_gem_object *obj; + int npages; + + obj = __vkms_gem_create(dev, attach->dmabuf->size); + if (IS_ERR(obj)) + return ERR_CAST(obj); + + npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE; + DRM_DEBUG_PRIME("Importing %d pages\n", npages); + + obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); + if (!obj->pages) { + vkms_gem_free_object(&obj->gem); + return ERR_PTR(-ENOMEM); + } + + drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages); + return &obj->gem; +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index aad8d8140259..74016a08d118 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -566,7 +566,7 @@ static void vmw_user_bo_ref_obj_release(struct ttm_base_object *base, switch (ref_type) { case TTM_REF_SYNCCPU_WRITE: - ttm_bo_synccpu_write_release(&user_bo->vbo.base); + atomic_dec(&user_bo->vbo.cpu_writers); break; default: WARN_ONCE(true, "Undefined buffer object reference release.\n"); @@ -682,12 +682,12 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, struct ttm_object_file *tfile, uint32_t flags) { + bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); struct ttm_buffer_object *bo = &user_bo->vbo.base; bool existed; int ret; if (flags & drm_vmw_synccpu_allow_cs) { - bool nonblock = !!(flags & drm_vmw_synccpu_dontblock); long lret; lret = dma_resv_wait_timeout_rcu @@ -700,15 +700,22 @@ static int vmw_user_bo_synccpu_grab(struct vmw_user_buffer_object *user_bo, return 0; } - ret = ttm_bo_synccpu_write_grab - (bo, !!(flags & drm_vmw_synccpu_dontblock)); + ret = ttm_bo_reserve(bo, true, nonblock, NULL); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_bo_wait(bo, true, nonblock); + if (likely(ret == 0)) + atomic_inc(&user_bo->vbo.cpu_writers); + + ttm_bo_unreserve(bo); if (unlikely(ret != 0)) return ret; ret = ttm_ref_object_add(tfile, &user_bo->prime.base, TTM_REF_SYNCCPU_WRITE, &existed, false); if (ret != 0 || existed) - ttm_bo_synccpu_write_release(&user_bo->vbo.base); + atomic_dec(&user_bo->vbo.cpu_writers); return ret; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index b38bcb032c99..e962048f65d2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -576,8 +576,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) else dev_priv->map_mode = vmw_dma_map_populate; - /* No TTM coherent page pool? FIXME: Ask TTM instead! */ - if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) && + if (!IS_ENABLED(CONFIG_DRM_TTM_DMA_PAGE_POOL) && (dev_priv->map_mode == vmw_dma_alloc_coherent)) return -EINVAL; @@ -827,9 +826,13 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_no_fman; } + drm_vma_offset_manager_init(&dev_priv->vma_manager, + DRM_FILE_PAGE_OFFSET_START, + DRM_FILE_PAGE_OFFSET_SIZE); ret = ttm_bo_device_init(&dev_priv->bdev, &vmw_bo_driver, dev->anon_inode->i_mapping, + &dev_priv->vma_manager, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing TTM buffer object driver.\n"); @@ -986,6 +989,7 @@ static void vmw_driver_unload(struct drm_device *dev) if (dev_priv->has_mob) (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); (void) ttm_bo_device_release(&dev_priv->bdev); + drm_vma_offset_manager_destroy(&dev_priv->vma_manager); vmw_release_device_late(dev_priv); vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->capabilities & SVGA_CAP_IRQMASK) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index 5eb73ded8e07..b18842f73081 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -102,6 +102,8 @@ struct vmw_fpriv { * @base: The TTM buffer object * @res_list: List of resources using this buffer object as a backing MOB * @pin_count: pin depth + * @cpu_writers: Number of synccpu write grabs. Protected by reservation when + * increased. May be decreased without reservation. * @dx_query_ctx: DX context if this buffer object is used as a DX query MOB * @map: Kmap object for semi-persistent mappings * @res_prios: Eviction priority counts for attached resources @@ -110,6 +112,7 @@ struct vmw_buffer_object { struct ttm_buffer_object base; struct list_head res_list; s32 pin_count; + atomic_t cpu_writers; /* Not ref-counted. Protected by binding_mutex */ struct vmw_resource *dx_query_ctx; /* Protected by reservation */ @@ -438,6 +441,7 @@ struct vmw_private { struct vmw_fifo_state fifo; struct drm_device *dev; + struct drm_vma_offset_manager vma_manager; unsigned long vmw_chipset; unsigned int io_start; uint32_t vram_start; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 5581a7826b4c..6dfe36fb817c 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -492,8 +492,7 @@ vmw_resource_check_buffer(struct ww_acquire_ctx *ticket, val_buf->bo = &res->backup->base; val_buf->num_shared = 0; list_add_tail(&val_buf->head, &val_list); - ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL, - true); + ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL); if (unlikely(ret != 0)) goto out_no_reserve; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 29d8794f0421..de0530b4dc1b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -336,7 +336,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) { struct vmw_private *dev_priv = res->dev_priv; - struct vmw_surface *srf; void *cmd; if (res->func->destroy == vmw_gb_surface_destroy) { @@ -360,7 +359,6 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res) */ mutex_lock(&dev_priv->cmdbuf_mutex); - srf = vmw_res_to_srf(res); dev_priv->used_memory_size -= res->backup_size; mutex_unlock(&dev_priv->cmdbuf_mutex); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index f611b2290a1b..7bff3628fc54 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -521,6 +521,9 @@ int vmw_validation_bo_validate_single(struct ttm_buffer_object *bo, }; int ret; + if (atomic_read(&vbo->cpu_writers)) + return -EBUSY; + if (vbo->pin_count > 0) return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h index 0e063743dd86..71ce4b318850 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h @@ -170,7 +170,7 @@ vmw_validation_bo_reserve(struct vmw_validation_context *ctx, bool intr) { return ttm_eu_reserve_buffers(&ctx->ticket, &ctx->bo_list, intr, - NULL, true); + NULL); } /** diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c index 21ad1c359b61..ff506bc99414 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_kms.c +++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c @@ -270,11 +270,12 @@ static void display_update(struct drm_simple_display_pipe *pipe, } static enum drm_mode_status -display_mode_valid(struct drm_crtc *crtc, const struct drm_display_mode *mode) +display_mode_valid(struct drm_simple_display_pipe *pipe, + const struct drm_display_mode *mode) { struct xen_drm_front_drm_pipeline *pipeline = - container_of(crtc, struct xen_drm_front_drm_pipeline, - pipe.crtc); + container_of(pipe, struct xen_drm_front_drm_pipeline, + pipe); if (mode->hdisplay != pipeline->width) return MODE_ERROR; |