diff options
Diffstat (limited to 'drivers/gpu')
348 files changed, 9711 insertions, 2979 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index da3d02154fa6..eb09037a7161 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -85,8 +85,12 @@ extern int amdgpu_vm_debug; extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; extern int amdgpu_powerplay; +extern int amdgpu_powercontainment; extern unsigned amdgpu_pcie_gen_cap; extern unsigned amdgpu_pcie_lane_cap; +extern unsigned amdgpu_cg_mask; +extern unsigned amdgpu_pg_mask; +extern char *amdgpu_disable_cu; #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ @@ -183,6 +187,10 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, int amdgpu_set_powergating_state(struct amdgpu_device *adev, enum amd_ip_block_type block_type, enum amd_powergating_state state); +int amdgpu_wait_for_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); +bool amdgpu_is_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); struct amdgpu_ip_block_version { enum amd_ip_block_type type; @@ -594,11 +602,9 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync, struct reservation_object *resv, void *owner); -bool amdgpu_sync_is_idle(struct amdgpu_sync *sync); -int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src, - struct fence *fence); +struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, + struct amdgpu_ring *ring); struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync); -int amdgpu_sync_wait(struct amdgpu_sync *sync); void amdgpu_sync_free(struct amdgpu_sync *sync); int amdgpu_sync_init(void); void amdgpu_sync_fini(void); @@ -754,12 +760,11 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, struct amdgpu_job **job); +void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_free(struct amdgpu_job *job); -void amdgpu_job_free_func(struct kref *refcount); int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f); -void amdgpu_job_timeout_func(struct work_struct *work); struct amdgpu_ring { struct amdgpu_device *adev; @@ -771,8 +776,6 @@ struct amdgpu_ring { struct amdgpu_bo *ring_obj; volatile uint32_t *ring; unsigned rptr_offs; - u64 next_rptr_gpu_addr; - volatile u32 *next_rptr_cpu_addr; unsigned wptr; unsigned wptr_old; unsigned ring_size; @@ -791,7 +794,6 @@ struct amdgpu_ring { u32 doorbell_index; bool use_doorbell; unsigned wptr_offs; - unsigned next_rptr_offs; unsigned fence_offs; uint64_t current_ctx; enum amdgpu_ring_type type; @@ -799,6 +801,9 @@ struct amdgpu_ring { unsigned cond_exe_offs; u64 cond_exe_gpu_addr; volatile u32 *cond_exe_cpu_addr; +#if defined(CONFIG_DEBUG_FS) + struct dentry *ent; +#endif }; /* @@ -861,6 +866,7 @@ struct amdgpu_vm { struct amdgpu_bo *page_directory; unsigned max_pde_used; struct fence *page_directory_fence; + uint64_t last_eviction_counter; /* array of page tables, one for each page directory entry */ struct amdgpu_vm_pt *page_tables; @@ -883,13 +889,14 @@ struct amdgpu_vm_id { struct fence *first; struct amdgpu_sync active; struct fence *last_flush; - struct amdgpu_ring *last_user; atomic64_t owner; uint64_t pd_gpu_addr; /* last flushed PD/PT update */ struct fence *flushed_updates; + uint32_t current_gpu_reset_count; + uint32_t gds_base; uint32_t gds_size; uint32_t gws_base; @@ -905,6 +912,10 @@ struct amdgpu_vm_manager { struct list_head ids_lru; struct amdgpu_vm_id ids[AMDGPU_NUM_VM]; + /* Handling of VM fences */ + u64 fence_context; + unsigned seqno[AMDGPU_MAX_RINGS]; + uint32_t max_pfn; /* vram base address for page table entry */ u64 vram_base_offset; @@ -926,17 +937,14 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, struct list_head *validated, struct amdgpu_bo_list_entry *entry); -void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates); +void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct list_head *duplicates); void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, struct amdgpu_vm *vm); int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct fence *fence, - unsigned *vm_id, uint64_t *vm_pd_addr); -int amdgpu_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr, - uint32_t gds_base, uint32_t gds_size, - uint32_t gws_base, uint32_t gws_size, - uint32_t oa_base, uint32_t oa_size); + struct amdgpu_job *job); +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job); void amdgpu_vm_reset_id(struct amdgpu_device *adev, unsigned vm_id); uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, @@ -1142,6 +1150,12 @@ struct amdgpu_cu_info { uint32_t bitmap[4][4]; }; +struct amdgpu_gfx_funcs { + /* get the gpu clock counter */ + uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); + void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance); +}; + struct amdgpu_gfx { struct mutex gpu_clock_mutex; struct amdgpu_gca_config config; @@ -1178,6 +1192,7 @@ struct amdgpu_gfx { /* ce ram size*/ unsigned ce_ram_size; struct amdgpu_cu_info cu_info; + const struct amdgpu_gfx_funcs *funcs; }; int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, @@ -1195,10 +1210,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count); void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib); void amdgpu_ring_commit(struct amdgpu_ring *ring); void amdgpu_ring_undo(struct amdgpu_ring *ring); -unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, - uint32_t **data); -int amdgpu_ring_restore(struct amdgpu_ring *ring, - unsigned size, uint32_t *data); int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, unsigned ring_size, u32 nop, u32 align_mask, struct amdgpu_irq_src *irq_src, unsigned irq_type, @@ -1250,6 +1261,7 @@ struct amdgpu_job { uint32_t num_ibs; void *owner; uint64_t ctx; + bool vm_needs_flush; unsigned vm_id; uint64_t vm_pd_addr; uint32_t gds_base, gds_size; @@ -1257,8 +1269,7 @@ struct amdgpu_job { uint32_t oa_base, oa_size; /* user fence handling */ - struct amdgpu_bo *uf_bo; - uint32_t uf_offset; + uint64_t uf_addr; uint64_t uf_sequence; }; @@ -1560,6 +1571,12 @@ struct amdgpu_dpm_funcs { u32 (*get_fan_control_mode)(struct amdgpu_device *adev); int (*set_fan_speed_percent)(struct amdgpu_device *adev, u32 speed); int (*get_fan_speed_percent)(struct amdgpu_device *adev, u32 *speed); + int (*force_clock_level)(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t mask); + int (*print_clock_levels)(struct amdgpu_device *adev, enum pp_clock_type type, char *buf); + int (*get_sclk_od)(struct amdgpu_device *adev); + int (*set_sclk_od)(struct amdgpu_device *adev, uint32_t value); + int (*get_mclk_od)(struct amdgpu_device *adev); + int (*set_mclk_od)(struct amdgpu_device *adev, uint32_t value); }; struct amdgpu_dpm { @@ -1767,6 +1784,8 @@ int amdgpu_debugfs_init(struct drm_minor *minor); void amdgpu_debugfs_cleanup(struct drm_minor *minor); #endif +int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev); + /* * amdgpu smumgr functions */ @@ -1811,15 +1830,13 @@ struct amdgpu_asic_funcs { u32 sh_num, u32 reg_offset, u32 *value); void (*set_vga_state)(struct amdgpu_device *adev, bool state); int (*reset)(struct amdgpu_device *adev); - /* wait for mc_idle */ - int (*wait_for_mc_idle)(struct amdgpu_device *adev); /* get the reference clock */ u32 (*get_xclk)(struct amdgpu_device *adev); - /* get the gpu clock counter */ - uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); /* MM block clocks */ int (*set_uvd_clocks)(struct amdgpu_device *adev, u32 vclk, u32 dclk); int (*set_vce_clocks)(struct amdgpu_device *adev, u32 evclk, u32 ecclk); + /* query virtual capabilities */ + u32 (*get_virtual_caps)(struct amdgpu_device *adev); }; /* @@ -1914,8 +1931,12 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); /* GPU virtualization */ +#define AMDGPU_VIRT_CAPS_SRIOV_EN (1 << 0) +#define AMDGPU_VIRT_CAPS_IS_VF (1 << 1) struct amdgpu_virtualization { bool supports_sr_iov; + bool is_virtual; + u32 caps; }; /* @@ -1997,6 +2018,10 @@ struct amdgpu_device { spinlock_t didt_idx_lock; amdgpu_rreg_t didt_rreg; amdgpu_wreg_t didt_wreg; + /* protects concurrent gc_cac register access */ + spinlock_t gc_cac_idx_lock; + amdgpu_rreg_t gc_cac_rreg; + amdgpu_wreg_t gc_cac_wreg; /* protects concurrent ENDPOINT (audio) register access */ spinlock_t audio_endpt_idx_lock; amdgpu_block_rreg_t audio_endpt_rreg; @@ -2022,6 +2047,7 @@ struct amdgpu_device { atomic64_t vram_vis_usage; atomic64_t gtt_usage; atomic64_t num_bytes_moved; + atomic64_t num_evictions; atomic_t gpu_reset_counter; /* display */ @@ -2125,6 +2151,8 @@ void amdgpu_mm_wdoorbell(struct amdgpu_device *adev, u32 index, u32 v); #define WREG32_UVD_CTX(reg, v) adev->uvd_ctx_wreg(adev, (reg), (v)) #define RREG32_DIDT(reg) adev->didt_rreg(adev, (reg)) #define WREG32_DIDT(reg, v) adev->didt_wreg(adev, (reg), (v)) +#define RREG32_GC_CAC(reg) adev->gc_cac_rreg(adev, (reg)) +#define WREG32_GC_CAC(reg, v) adev->gc_cac_wreg(adev, (reg), (v)) #define RREG32_AUDIO_ENDPT(block, reg) adev->audio_endpt_rreg(adev, (block), (reg)) #define WREG32_AUDIO_ENDPT(block, reg, v) adev->audio_endpt_wreg(adev, (block), (reg), (v)) #define WREG32_P(reg, val, mask) \ @@ -2200,11 +2228,10 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) */ #define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) -#define amdgpu_asic_wait_for_mc_idle(adev) (adev)->asic_funcs->wait_for_mc_idle((adev)) #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) #define amdgpu_asic_set_uvd_clocks(adev, v, d) (adev)->asic_funcs->set_uvd_clocks((adev), (v), (d)) #define amdgpu_asic_set_vce_clocks(adev, ev, ec) (adev)->asic_funcs->set_vce_clocks((adev), (ev), (ec)) -#define amdgpu_asic_get_gpu_clock_counter(adev) (adev)->asic_funcs->get_gpu_clock_counter((adev)) +#define amdgpu_asic_get_virtual_caps(adev) ((adev)->asic_funcs->get_virtual_caps((adev))) #define amdgpu_asic_read_disabled_bios(adev) (adev)->asic_funcs->read_disabled_bios((adev)) #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) @@ -2257,6 +2284,8 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_print_power_state(adev, ps) (adev)->pm.funcs->print_power_state((adev), (ps)) #define amdgpu_dpm_vblank_too_short(adev) (adev)->pm.funcs->vblank_too_short((adev)) #define amdgpu_dpm_enable_bapm(adev, e) (adev)->pm.funcs->enable_bapm((adev), (e)) +#define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) +#define amdgpu_gfx_select_se_sh(adev, se, sh, instance) (adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance)) #define amdgpu_dpm_get_temperature(adev) \ ((adev)->pp_enabled ? \ @@ -2335,6 +2364,18 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring) #define amdgpu_dpm_force_clock_level(adev, type, level) \ (adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level) +#define amdgpu_dpm_get_sclk_od(adev) \ + (adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle) + +#define amdgpu_dpm_set_sclk_od(adev, value) \ + (adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value) + +#define amdgpu_dpm_get_mclk_od(adev) \ + ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_set_mclk_od(adev, value) \ + ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) + #define amdgpu_dpm_dispatch_task(adev, event_id, input, output) \ (adev)->powerplay.pp_funcs->dispatch_tasks((adev)->powerplay.pp_handle, (event_id), (input), (output)) @@ -2376,9 +2417,13 @@ bool amdgpu_device_is_px(struct drm_device *dev); #if defined(CONFIG_VGA_SWITCHEROO) void amdgpu_register_atpx_handler(void); void amdgpu_unregister_atpx_handler(void); +bool amdgpu_has_atpx_dgpu_power_cntl(void); +bool amdgpu_is_atpx_hybrid(void); #else static inline void amdgpu_register_atpx_handler(void) {} static inline void amdgpu_unregister_atpx_handler(void) {} +static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } +static inline bool amdgpu_is_atpx_hybrid(void) { return false; } #endif /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 32809f749903..d080d0807a5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -240,8 +240,8 @@ uint64_t get_gpu_clock_counter(struct kgd_dev *kgd) { struct amdgpu_device *rdev = (struct amdgpu_device *)kgd; - if (rdev->asic_funcs->get_gpu_clock_counter) - return rdev->asic_funcs->get_gpu_clock_counter(rdev); + if (rdev->gfx.funcs->get_gpu_clock_counter) + return rdev->gfx.funcs->get_gpu_clock_counter(rdev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c index 35a1248aaa77..0494fe7b62c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/acpi.h> #include <linux/pci.h> +#include <linux/delay.h> #include "amd_acpi.h" @@ -27,6 +28,7 @@ struct amdgpu_atpx_functions { struct amdgpu_atpx { acpi_handle handle; struct amdgpu_atpx_functions functions; + bool is_hybrid; }; static struct amdgpu_atpx_priv { @@ -63,6 +65,14 @@ bool amdgpu_has_atpx(void) { return amdgpu_atpx_priv.atpx_detected; } +bool amdgpu_has_atpx_dgpu_power_cntl(void) { + return amdgpu_atpx_priv.atpx.functions.power_cntl; +} + +bool amdgpu_is_atpx_hybrid(void) { + return amdgpu_atpx_priv.atpx.is_hybrid; +} + /** * amdgpu_atpx_call - call an ATPX method * @@ -142,18 +152,12 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas */ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) { - /* make sure required functions are enabled */ - /* dGPU power control is required */ - if (atpx->functions.power_cntl == false) { - printk("ATPX dGPU power cntl not present, forcing\n"); - atpx->functions.power_cntl = true; - } + u32 valid_bits = 0; if (atpx->functions.px_params) { union acpi_object *info; struct atpx_px_params output; size_t size; - u32 valid_bits; info = amdgpu_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL); if (!info) @@ -172,19 +176,43 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx) memcpy(&output, info->buffer.pointer, size); valid_bits = output.flags & output.valid_flags; - /* if separate mux flag is set, mux controls are required */ - if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { - atpx->functions.i2c_mux_cntl = true; - atpx->functions.disp_mux_cntl = true; - } - /* if any outputs are muxed, mux controls are required */ - if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | - ATPX_TV_SIGNAL_MUXED | - ATPX_DFP_SIGNAL_MUXED)) - atpx->functions.disp_mux_cntl = true; kfree(info); } + + /* if separate mux flag is set, mux controls are required */ + if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { + atpx->functions.i2c_mux_cntl = true; + atpx->functions.disp_mux_cntl = true; + } + /* if any outputs are muxed, mux controls are required */ + if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | + ATPX_TV_SIGNAL_MUXED | + ATPX_DFP_SIGNAL_MUXED)) + atpx->functions.disp_mux_cntl = true; + + + /* some bioses set these bits rather than flagging power_cntl as supported */ + if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED | + ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED)) + atpx->functions.power_cntl = true; + + atpx->is_hybrid = false; + if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { + printk("ATPX Hybrid Graphics\n"); +#if 1 + /* This is a temporary hack until the D3 cold support + * makes it upstream. The ATPX power_control method seems + * to still work on even if the system should be using + * the new standardized hybrid D3 cold ACPI interface. + */ + atpx->functions.power_cntl = true; +#else + atpx->functions.power_cntl = false; +#endif + atpx->is_hybrid = true; + } + return 0; } @@ -259,6 +287,10 @@ static int amdgpu_atpx_set_discrete_state(struct amdgpu_atpx *atpx, u8 state) if (!info) return -EIO; kfree(info); + + /* 200ms delay is required after off */ + if (state == 0) + msleep(200); } return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 823bf5e0b0c8..651115dcce12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -94,6 +94,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, unsigned last_entry = 0, first_userptr = num_entries; unsigned i; int r; + unsigned long total_size = 0; array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry)); if (!array) @@ -140,6 +141,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; + total_size += amdgpu_bo_size(entry->robj); trace_amdgpu_bo_list_set(list, entry->robj); } @@ -155,6 +157,7 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev, list->array = array; list->num_entries = num_entries; + trace_amdgpu_cs_bo_status(list->num_entries, total_size); return 0; error_free: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c index 199f76baf22c..5556ce979199 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c @@ -312,6 +312,8 @@ static uint32_t amdgpu_cgs_read_ind_register(struct cgs_device *cgs_device, return RREG32_UVD_CTX(index); case CGS_IND_REG__DIDT: return RREG32_DIDT(index); + case CGS_IND_REG_GC_CAC: + return RREG32_GC_CAC(index); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return 0; @@ -336,6 +338,8 @@ static void amdgpu_cgs_write_ind_register(struct cgs_device *cgs_device, return WREG32_UVD_CTX(index, value); case CGS_IND_REG__DIDT: return WREG32_DIDT(index, value); + case CGS_IND_REG_GC_CAC: + return WREG32_GC_CAC(index, value); case CGS_IND_REG__AUDIO_ENDPT: DRM_ERROR("audio endpt register access not implemented.\n"); return; @@ -696,6 +700,17 @@ static uint32_t fw_type_convert(struct cgs_device *cgs_device, uint32_t fw_type) return result; } +static int amdgpu_cgs_rel_firmware(struct cgs_device *cgs_device, enum cgs_ucode_id type) +{ + CGS_FUNC_ADEV; + if ((CGS_UCODE_ID_SMU == type) || (CGS_UCODE_ID_SMU_SK == type)) { + release_firmware(adev->pm.fw); + return 0; + } + /* cannot release other firmware because they are not created by cgs */ + return -EINVAL; +} + static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info) @@ -776,6 +791,7 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device, } hdr = (const struct smc_firmware_header_v1_0 *) adev->pm.fw->data; + amdgpu_ucode_print_smc_hdr(&hdr->header); adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); @@ -819,6 +835,9 @@ static int amdgpu_cgs_query_system_info(struct cgs_device *cgs_device, case CGS_SYSTEM_INFO_GFX_CU_INFO: sys_info->value = adev->gfx.cu_info.number; break; + case CGS_SYSTEM_INFO_GFX_SE_INFO: + sys_info->value = adev->gfx.config.max_shader_engines; + break; default: return -ENODEV; } @@ -898,7 +917,7 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, struct cgs_acpi_method_argument *argument = NULL; uint32_t i, count; acpi_status status; - int result; + int result = 0; uint32_t func_no = 0xFFFFFFFF; handle = ACPI_HANDLE(&adev->pdev->dev); @@ -961,11 +980,11 @@ static int amdgpu_cgs_acpi_eval_object(struct cgs_device *cgs_device, params->integer.value = argument->value; break; case ACPI_TYPE_STRING: - params->string.length = argument->method_length; + params->string.length = argument->data_length; params->string.pointer = argument->pointer; break; case ACPI_TYPE_BUFFER: - params->buffer.length = argument->method_length; + params->buffer.length = argument->data_length; params->buffer.pointer = argument->pointer; break; default: @@ -1068,17 +1087,14 @@ int amdgpu_cgs_call_acpi_method(struct cgs_device *cgs_device, struct cgs_acpi_method_info info = {0}; acpi_input[0].type = CGS_ACPI_TYPE_INTEGER; - acpi_input[0].method_length = sizeof(uint32_t); acpi_input[0].data_length = sizeof(uint32_t); acpi_input[0].value = acpi_function; acpi_input[1].type = CGS_ACPI_TYPE_BUFFER; - acpi_input[1].method_length = CGS_ACPI_MAX_BUFFER_SIZE; acpi_input[1].data_length = input_size; acpi_input[1].pointer = pinput; acpi_output.type = CGS_ACPI_TYPE_BUFFER; - acpi_output.method_length = CGS_ACPI_MAX_BUFFER_SIZE; acpi_output.data_length = output_size; acpi_output.pointer = poutput; @@ -1125,6 +1141,7 @@ static const struct cgs_ops amdgpu_cgs_ops = { amdgpu_cgs_pm_query_clock_limits, amdgpu_cgs_set_camera_voltages, amdgpu_cgs_get_firmware_info, + amdgpu_cgs_rel_firmware, amdgpu_cgs_set_powergating_state, amdgpu_cgs_set_clockgating_state, amdgpu_cgs_get_active_displays_info, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c index cb07da41152b..ff0b55a65ca3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c @@ -1690,7 +1690,6 @@ amdgpu_connector_add(struct amdgpu_device *adev, DRM_MODE_SCALE_NONE); /* no HPD on analog connectors */ amdgpu_connector->hpd.hpd = AMDGPU_HPD_NONE; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; @@ -1893,8 +1892,10 @@ amdgpu_connector_add(struct amdgpu_device *adev, } if (amdgpu_connector->hpd.hpd == AMDGPU_HPD_NONE) { - if (i2c_bus->valid) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (i2c_bus->valid) { + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } } else connector->polled = DRM_CONNECTOR_POLL_HPD; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 9bc8f1d99733..0307ff5887c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -216,11 +216,8 @@ int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data) if (ret) goto free_all_kdata; - if (p->uf_entry.robj) { - p->job->uf_bo = amdgpu_bo_ref(p->uf_entry.robj); - p->job->uf_offset = uf_offset; - } - + if (p->uf_entry.robj) + p->job->uf_addr = uf_offset; kfree(chunk_array); return 0; @@ -459,7 +456,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, list_splice(&need_pages, &p->validated); } - amdgpu_vm_get_pt_bos(&fpriv->vm, &duplicates); + amdgpu_vm_get_pt_bos(p->adev, &fpriv->vm, &duplicates); p->bytes_moved_threshold = amdgpu_cs_get_threshold_for_moves(p->adev); p->bytes_moved = 0; @@ -472,6 +469,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, if (r) goto error_validate; + fpriv->vm.last_eviction_counter = + atomic64_read(&p->adev->num_evictions); + if (p->bo_list) { struct amdgpu_bo *gds = p->bo_list->gds_obj; struct amdgpu_bo *gws = p->bo_list->gws_obj; @@ -499,6 +499,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, } } + if (p->uf_entry.robj) + p->job->uf_addr += amdgpu_bo_gpu_offset(p->uf_entry.robj); + error_validate: if (r) { amdgpu_vm_move_pt_bos_in_lru(p->adev, &fpriv->vm); @@ -653,18 +656,21 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev, /* Only for UVD/VCE VM emulation */ if (ring->funcs->parse_cs) { + p->job->vm = NULL; for (i = 0; i < p->job->num_ibs; i++) { r = amdgpu_ring_parse_cs(ring, p, i); if (r) return r; } - } + } else { + p->job->vm_pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); - r = amdgpu_bo_vm_update_pte(p, vm); - if (!r) - amdgpu_cs_sync_rings(p); + r = amdgpu_bo_vm_update_pte(p, vm); + if (r) + return r; + } - return r; + return amdgpu_cs_sync_rings(p); } static int amdgpu_cs_handle_lockup(struct amdgpu_device *adev, int r) @@ -761,7 +767,7 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev, } /* UVD & VCE fw doesn't support user fences */ - if (parser->job->uf_bo && ( + if (parser->job->uf_addr && ( parser->job->ring->type == AMDGPU_RING_TYPE_UVD || parser->job->ring->type == AMDGPU_RING_TYPE_VCE)) return -EINVAL; @@ -830,17 +836,13 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, { struct amdgpu_ring *ring = p->job->ring; struct amd_sched_entity *entity = &p->ctx->rings[ring->idx].entity; - struct fence *fence; struct amdgpu_job *job; int r; job = p->job; p->job = NULL; - r = amd_sched_job_init(&job->base, &ring->sched, - entity, amdgpu_job_timeout_func, - amdgpu_job_free_func, - p->filp, &fence); + r = amd_sched_job_init(&job->base, &ring->sched, entity, p->filp); if (r) { amdgpu_job_free(job); return r; @@ -848,9 +850,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, job->owner = p->filp; job->ctx = entity->fence_context; - p->fence = fence_get(fence); - cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, fence); + p->fence = fence_get(&job->base.s_fence->finished); + cs->out.handle = amdgpu_ctx_add_fence(p->ctx, ring, p->fence); job->uf_sequence = cs->out.handle; + amdgpu_job_free_resources(job); trace_amdgpu_cs_ioctl(job); amd_sched_entity_push_job(&job->base); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index bb8b149786d7..614fb026436d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -25,6 +25,7 @@ * Alex Deucher * Jerome Glisse */ +#include <linux/kthread.h> #include <linux/console.h> #include <linux/slab.h> #include <linux/debugfs.h> @@ -35,6 +36,7 @@ #include <linux/vga_switcheroo.h> #include <linux/efi.h> #include "amdgpu.h" +#include "amdgpu_trace.h" #include "amdgpu_i2c.h" #include "atom.h" #include "amdgpu_atombios.h" @@ -79,24 +81,27 @@ bool amdgpu_device_is_px(struct drm_device *dev) uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg, bool always_indirect) { + uint32_t ret; + if ((reg * 4) < adev->rmmio_size && !always_indirect) - return readl(((void __iomem *)adev->rmmio) + (reg * 4)); + ret = readl(((void __iomem *)adev->rmmio) + (reg * 4)); else { unsigned long flags; - uint32_t ret; spin_lock_irqsave(&adev->mmio_idx_lock, flags); writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4)); ret = readl(((void __iomem *)adev->rmmio) + (mmMM_DATA * 4)); spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - - return ret; } + trace_amdgpu_mm_rreg(adev->pdev->device, reg, ret); + return ret; } void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, bool always_indirect) { + trace_amdgpu_mm_wreg(adev->pdev->device, reg, v); + if ((reg * 4) < adev->rmmio_size && !always_indirect) writel(v, ((void __iomem *)adev->rmmio) + (reg * 4)); else { @@ -827,8 +832,10 @@ static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg) */ static void amdgpu_atombios_fini(struct amdgpu_device *adev) { - if (adev->mode_info.atom_context) + if (adev->mode_info.atom_context) { kfree(adev->mode_info.atom_context->scratch); + kfree(adev->mode_info.atom_context->iio); + } kfree(adev->mode_info.atom_context); adev->mode_info.atom_context = NULL; kfree(adev->mode_info.atom_card_info); @@ -1068,11 +1075,14 @@ int amdgpu_set_clockgating_state(struct amdgpu_device *adev, int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; if (adev->ip_blocks[i].type == block_type) { r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, state); if (r) return r; + break; } } return r; @@ -1085,16 +1095,53 @@ int amdgpu_set_powergating_state(struct amdgpu_device *adev, int i, r = 0; for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; if (adev->ip_blocks[i].type == block_type) { r = adev->ip_blocks[i].funcs->set_powergating_state((void *)adev, state); if (r) return r; + break; } } return r; } +int amdgpu_wait_for_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) +{ + int i, r; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_blocks[i].type == block_type) { + r = adev->ip_blocks[i].funcs->wait_for_idle((void *)adev); + if (r) + return r; + break; + } + } + return 0; + +} + +bool amdgpu_is_idle(struct amdgpu_device *adev, + enum amd_ip_block_type block_type) +{ + int i; + + for (i = 0; i < adev->num_ip_blocks; i++) { + if (!adev->ip_block_status[i].valid) + continue; + if (adev->ip_blocks[i].type == block_type) + return adev->ip_blocks[i].funcs->is_idle((void *)adev); + } + return true; + +} + const struct amdgpu_ip_block_version * amdgpu_get_ip_block( struct amdgpu_device *adev, enum amd_ip_block_type type) @@ -1207,6 +1254,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev) } } + adev->cg_flags &= amdgpu_cg_mask; + adev->pg_flags &= amdgpu_pg_mask; + return 0; } @@ -1325,6 +1375,11 @@ static int amdgpu_fini(struct amdgpu_device *adev) adev->ip_block_status[i].valid = false; } + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { + if (adev->ip_blocks[i].funcs->late_fini) + adev->ip_blocks[i].funcs->late_fini((void *)adev); + } + return 0; } @@ -1378,6 +1433,15 @@ static int amdgpu_resume(struct amdgpu_device *adev) return 0; } +static bool amdgpu_device_is_virtual(void) +{ +#ifdef CONFIG_X86 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); +#else + return false; +#endif +} + /** * amdgpu_device_init - initialize the driver * @@ -1424,9 +1488,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->uvd_ctx_wreg = &amdgpu_invalid_wreg; adev->didt_rreg = &amdgpu_invalid_rreg; adev->didt_wreg = &amdgpu_invalid_wreg; + adev->gc_cac_rreg = &amdgpu_invalid_rreg; + adev->gc_cac_wreg = &amdgpu_invalid_wreg; adev->audio_endpt_rreg = &amdgpu_block_invalid_rreg; adev->audio_endpt_wreg = &amdgpu_block_invalid_wreg; + DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n", amdgpu_asic_name[adev->asic_type], pdev->vendor, pdev->device, pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision); @@ -1451,6 +1518,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->pcie_idx_lock); spin_lock_init(&adev->uvd_ctx_idx_lock); spin_lock_init(&adev->didt_idx_lock); + spin_lock_init(&adev->gc_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); adev->rmmio_base = pci_resource_start(adev->pdev, 5); @@ -1495,29 +1563,38 @@ int amdgpu_device_init(struct amdgpu_device *adev, vga_switcheroo_init_domain_pm_ops(adev->dev, &adev->vga_pm_domain); /* Read BIOS */ - if (!amdgpu_get_bios(adev)) - return -EINVAL; + if (!amdgpu_get_bios(adev)) { + r = -EINVAL; + goto failed; + } /* Must be an ATOMBIOS */ if (!adev->is_atom_bios) { dev_err(adev->dev, "Expecting atombios for GPU\n"); - return -EINVAL; + r = -EINVAL; + goto failed; } r = amdgpu_atombios_init(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_init failed\n"); - return r; + goto failed; } /* See if the asic supports SR-IOV */ adev->virtualization.supports_sr_iov = amdgpu_atombios_has_gpu_virtualization_table(adev); + /* Check if we are executing in a virtualized environment */ + adev->virtualization.is_virtual = amdgpu_device_is_virtual(); + adev->virtualization.caps = amdgpu_asic_get_virtual_caps(adev); + /* Post card if necessary */ if (!amdgpu_card_posted(adev) || - adev->virtualization.supports_sr_iov) { + (adev->virtualization.is_virtual && + !(adev->virtualization.caps & AMDGPU_VIRT_CAPS_SRIOV_EN))) { if (!adev->bios) { dev_err(adev->dev, "Card not posted and no BIOS - ignoring\n"); - return -EINVAL; + r = -EINVAL; + goto failed; } DRM_INFO("GPU not posted. posting now...\n"); amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -1527,7 +1604,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_atombios_get_clock_info(adev); if (r) { dev_err(adev->dev, "amdgpu_atombios_get_clock_info failed\n"); - return r; + goto failed; } /* init i2c buses */ amdgpu_atombios_i2c_init(adev); @@ -1536,7 +1613,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_fence_driver_init(adev); if (r) { dev_err(adev->dev, "amdgpu_fence_driver_init failed\n"); - return r; + goto failed; } /* init the mode config */ @@ -1546,7 +1623,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, if (r) { dev_err(adev->dev, "amdgpu_init failed\n"); amdgpu_fini(adev); - return r; + goto failed; } adev->accel_working = true; @@ -1556,7 +1633,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_ib_pool_init(adev); if (r) { dev_err(adev->dev, "IB initialization failed (%d).\n", r); - return r; + goto failed; } r = amdgpu_ib_ring_tests(adev); @@ -1573,6 +1650,12 @@ int amdgpu_device_init(struct amdgpu_device *adev, DRM_ERROR("registering register debugfs failed (%d).\n", r); } + r = amdgpu_debugfs_firmware_init(adev); + if (r) { + DRM_ERROR("registering firmware debugfs failed (%d).\n", r); + return r; + } + if ((amdgpu_testing & 1)) { if (adev->accel_working) amdgpu_test_moves(adev); @@ -1598,10 +1681,15 @@ int amdgpu_device_init(struct amdgpu_device *adev, r = amdgpu_late_init(adev); if (r) { dev_err(adev->dev, "amdgpu_late_init failed\n"); - return r; + goto failed; } return 0; + +failed: + if (runtime) + vga_switcheroo_fini_domain_pm_ops(adev->dev); + return r; } static void amdgpu_debugfs_remove_files(struct amdgpu_device *adev); @@ -1624,6 +1712,7 @@ void amdgpu_device_fini(struct amdgpu_device *adev) amdgpu_bo_evict_vram(adev); amdgpu_ib_pool_fini(adev); amdgpu_fence_driver_fini(adev); + drm_crtc_force_disable_all(adev->ddev); amdgpu_fbdev_fini(adev); r = amdgpu_fini(adev); kfree(adev->ip_block_status); @@ -1635,6 +1724,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev) kfree(adev->bios); adev->bios = NULL; vga_switcheroo_unregister_client(adev->pdev); + if (adev->flags & AMD_IS_PX) + vga_switcheroo_fini_domain_pm_ops(adev->dev); vga_client_register(adev->pdev, NULL, NULL, NULL); if (adev->rio_mem) pci_iounmap(adev->pdev, adev->rio_mem); @@ -1840,11 +1931,6 @@ int amdgpu_resume_kms(struct drm_device *dev, bool resume, bool fbcon) */ int amdgpu_gpu_reset(struct amdgpu_device *adev) { - unsigned ring_sizes[AMDGPU_MAX_RINGS]; - uint32_t *ring_data[AMDGPU_MAX_RINGS]; - - bool saved = false; - int i, r; int resched; @@ -1853,22 +1939,30 @@ int amdgpu_gpu_reset(struct amdgpu_device *adev) /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); - r = amdgpu_suspend(adev); - + /* block scheduler */ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; + if (!ring) continue; - - ring_sizes[i] = amdgpu_ring_backup(ring, &ring_data[i]); - if (ring_sizes[i]) { - saved = true; - dev_info(adev->dev, "Saved %d dwords of commands " - "on ring %d.\n", ring_sizes[i], i); - } + kthread_park(ring->sched.thread); + amd_sched_hw_job_reset(&ring->sched); } + /* after all hw jobs are reset, hw fence is meaningless, so force_completion */ + amdgpu_fence_driver_force_completion(adev); + + /* save scratch */ + amdgpu_atombios_scratch_regs_save(adev); + r = amdgpu_suspend(adev); retry: + /* Disable fb access */ + if (adev->mode_info.num_crtc) { + struct amdgpu_mode_mc_save save; + amdgpu_display_stop_mc_access(adev, &save); + amdgpu_wait_for_idle(adev, AMD_IP_BLOCK_TYPE_GMC); + } + r = amdgpu_asic_reset(adev); /* post card */ amdgpu_atom_asic_init(adev->mode_info.atom_context); @@ -1877,32 +1971,29 @@ retry: dev_info(adev->dev, "GPU reset succeeded, trying to resume\n"); r = amdgpu_resume(adev); } - + /* restore scratch */ + amdgpu_atombios_scratch_regs_restore(adev); if (!r) { + r = amdgpu_ib_ring_tests(adev); + if (r) { + dev_err(adev->dev, "ib ring test failed (%d).\n", r); + r = amdgpu_suspend(adev); + goto retry; + } + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = adev->rings[i]; if (!ring) continue; - - amdgpu_ring_restore(ring, ring_sizes[i], ring_data[i]); - ring_sizes[i] = 0; - ring_data[i] = NULL; - } - - r = amdgpu_ib_ring_tests(adev); - if (r) { - dev_err(adev->dev, "ib ring test failed (%d).\n", r); - if (saved) { - saved = false; - r = amdgpu_suspend(adev); - goto retry; - } + amd_sched_job_recovery(&ring->sched); + kthread_unpark(ring->sched.thread); } } else { - amdgpu_fence_driver_force_completion(adev); + dev_err(adev->dev, "asic resume failed (%d).\n", r); for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - if (adev->rings[i]) - kfree(ring_data[i]); + if (adev->rings[i]) { + kthread_unpark(adev->rings[i]->sched.thread); + } } } @@ -1913,13 +2004,11 @@ retry: /* bad news, how to tell it to userspace ? */ dev_info(adev->dev, "GPU reset failed\n"); } + amdgpu_irq_gpu_reset_resume_helper(adev); return r; } -#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */ -#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */ - void amdgpu_get_pcie_info(struct amdgpu_device *adev) { u32 mask; @@ -2073,20 +2162,43 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, struct amdgpu_device *adev = f->f_inode->i_private; ssize_t result = 0; int r; + bool use_bank; + unsigned instance_bank, sh_bank, se_bank; if (size & 0x3 || *pos & 0x3) return -EINVAL; + if (*pos & (1ULL << 62)) { + se_bank = (*pos >> 24) & 0x3FF; + sh_bank = (*pos >> 34) & 0x3FF; + instance_bank = (*pos >> 44) & 0x3FF; + use_bank = 1; + *pos &= 0xFFFFFF; + } else { + use_bank = 0; + } + + if (use_bank) { + if (sh_bank >= adev->gfx.config.max_sh_per_se || + se_bank >= adev->gfx.config.max_shader_engines) + return -EINVAL; + mutex_lock(&adev->grbm_idx_mutex); + amdgpu_gfx_select_se_sh(adev, se_bank, + sh_bank, instance_bank); + } + while (size) { uint32_t value; if (*pos > adev->rmmio_size) - return result; + goto end; value = RREG32(*pos >> 2); r = put_user(value, (uint32_t *)buf); - if (r) - return r; + if (r) { + result = r; + goto end; + } result += 4; buf += 4; @@ -2094,6 +2206,12 @@ static ssize_t amdgpu_debugfs_regs_read(struct file *f, char __user *buf, size -= 4; } +end: + if (use_bank) { + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + } + return result; } @@ -2293,6 +2411,68 @@ static ssize_t amdgpu_debugfs_regs_smc_write(struct file *f, const char __user * return result; } +static ssize_t amdgpu_debugfs_gca_config_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_device *adev = f->f_inode->i_private; + ssize_t result = 0; + int r; + uint32_t *config, no_regs = 0; + + if (size & 0x3 || *pos & 0x3) + return -EINVAL; + + config = kmalloc(256 * sizeof(*config), GFP_KERNEL); + if (!config) + return -ENOMEM; + + /* version, increment each time something is added */ + config[no_regs++] = 0; + config[no_regs++] = adev->gfx.config.max_shader_engines; + config[no_regs++] = adev->gfx.config.max_tile_pipes; + config[no_regs++] = adev->gfx.config.max_cu_per_sh; + config[no_regs++] = adev->gfx.config.max_sh_per_se; + config[no_regs++] = adev->gfx.config.max_backends_per_se; + config[no_regs++] = adev->gfx.config.max_texture_channel_caches; + config[no_regs++] = adev->gfx.config.max_gprs; + config[no_regs++] = adev->gfx.config.max_gs_threads; + config[no_regs++] = adev->gfx.config.max_hw_contexts; + config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_frontend; + config[no_regs++] = adev->gfx.config.sc_prim_fifo_size_backend; + config[no_regs++] = adev->gfx.config.sc_hiz_tile_fifo_size; + config[no_regs++] = adev->gfx.config.sc_earlyz_tile_fifo_size; + config[no_regs++] = adev->gfx.config.num_tile_pipes; + config[no_regs++] = adev->gfx.config.backend_enable_mask; + config[no_regs++] = adev->gfx.config.mem_max_burst_length_bytes; + config[no_regs++] = adev->gfx.config.mem_row_size_in_kb; + config[no_regs++] = adev->gfx.config.shader_engine_tile_size; + config[no_regs++] = adev->gfx.config.num_gpus; + config[no_regs++] = adev->gfx.config.multi_gpu_tile_size; + config[no_regs++] = adev->gfx.config.mc_arb_ramcfg; + config[no_regs++] = adev->gfx.config.gb_addr_config; + config[no_regs++] = adev->gfx.config.num_rbs; + + while (size && (*pos < no_regs * 4)) { + uint32_t value; + + value = config[*pos >> 2]; + r = put_user(value, (uint32_t *)buf); + if (r) { + kfree(config); + return r; + } + + result += 4; + buf += 4; + *pos += 4; + size -= 4; + } + + kfree(config); + return result; +} + + static const struct file_operations amdgpu_debugfs_regs_fops = { .owner = THIS_MODULE, .read = amdgpu_debugfs_regs_read, @@ -2318,11 +2498,18 @@ static const struct file_operations amdgpu_debugfs_regs_smc_fops = { .llseek = default_llseek }; +static const struct file_operations amdgpu_debugfs_gca_config_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_gca_config_read, + .llseek = default_llseek +}; + static const struct file_operations *debugfs_regs[] = { &amdgpu_debugfs_regs_fops, &amdgpu_debugfs_regs_didt_fops, &amdgpu_debugfs_regs_pcie_fops, &amdgpu_debugfs_regs_smc_fops, + &amdgpu_debugfs_gca_config_fops, }; static const char *debugfs_regs_names[] = { @@ -2330,6 +2517,7 @@ static const char *debugfs_regs_names[] = { "amdgpu_regs_didt", "amdgpu_regs_pcie", "amdgpu_regs_smc", + "amdgpu_gca_config", }; static int amdgpu_debugfs_regs_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index a6eecf6f9065..7dbe8d02c5a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -220,19 +220,17 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc, r = amdgpu_bo_pin_restricted(new_rbo, AMDGPU_GEM_DOMAIN_VRAM, 0, 0, &base); if (unlikely(r != 0)) { - amdgpu_bo_unreserve(new_rbo); r = -EINVAL; DRM_ERROR("failed to pin new rbo buffer before flip\n"); - goto cleanup; + goto unreserve; } r = reservation_object_get_fences_rcu(new_rbo->tbo.resv, &work->excl, &work->shared_count, &work->shared); if (unlikely(r != 0)) { - amdgpu_bo_unreserve(new_rbo); DRM_ERROR("failed to get fences for buffer\n"); - goto cleanup; + goto unpin; } amdgpu_bo_get_tiling_flags(new_rbo, &tiling_flags); @@ -275,9 +273,11 @@ pflip_cleanup: DRM_ERROR("failed to reserve new rbo in error path\n"); goto cleanup; } +unpin: if (unlikely(amdgpu_bo_unpin(new_rbo) != 0)) { DRM_ERROR("failed to unpin new rbo in error path\n"); } +unreserve: amdgpu_bo_unreserve(new_rbo); cleanup: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index f888c015f76c..015f1f4aae53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -82,8 +82,12 @@ int amdgpu_exp_hw_support = 0; int amdgpu_sched_jobs = 32; int amdgpu_sched_hw_submission = 2; int amdgpu_powerplay = -1; +int amdgpu_powercontainment = 1; unsigned amdgpu_pcie_gen_cap = 0; unsigned amdgpu_pcie_lane_cap = 0; +unsigned amdgpu_cg_mask = 0xffffffff; +unsigned amdgpu_pg_mask = 0xffffffff; +char *amdgpu_disable_cu = NULL; MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes"); module_param_named(vramlimit, amdgpu_vram_limit, int, 0600); @@ -160,6 +164,9 @@ module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444); #ifdef CONFIG_DRM_AMD_POWERPLAY MODULE_PARM_DESC(powerplay, "Powerplay component (1 = enable, 0 = disable, -1 = auto (default))"); module_param_named(powerplay, amdgpu_powerplay, int, 0444); + +MODULE_PARM_DESC(powercontainment, "Power Containment (1 = enable (default), 0 = disable)"); +module_param_named(powercontainment, amdgpu_powercontainment, int, 0444); #endif MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))"); @@ -168,6 +175,15 @@ module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444); MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))"); module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444); +MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)"); +module_param_named(cg_mask, amdgpu_cg_mask, uint, 0444); + +MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)"); +module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444); + +MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)"); +module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444); + static const struct pci_device_id pciidlist[] = { #ifdef CONFIG_DRM_AMDGPU_CIK /* Kaveri */ @@ -413,7 +429,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev) pci_save_state(pdev); pci_disable_device(pdev); pci_ignore_hotplug(pdev); - pci_set_power_state(pdev, PCI_D3cold); + if (amdgpu_is_atpx_hybrid()) + pci_set_power_state(pdev, PCI_D3cold); + else if (!amdgpu_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D3hot); drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; return 0; @@ -430,7 +449,9 @@ static int amdgpu_pmops_runtime_resume(struct device *dev) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - pci_set_power_state(pdev, PCI_D0); + if (amdgpu_is_atpx_hybrid() || + !amdgpu_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) @@ -515,7 +536,7 @@ static struct drm_driver kms_driver = { .driver_features = DRIVER_USE_AGP | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM | - DRIVER_PRIME | DRIVER_RENDER, + DRIVER_PRIME | DRIVER_RENDER | DRIVER_MODESET, .dev_priv_size = 0, .load = amdgpu_driver_load_kms, .open = amdgpu_driver_open_kms, @@ -590,7 +611,6 @@ static int __init amdgpu_init(void) DRM_INFO("amdgpu kernel modesetting enabled.\n"); driver = &kms_driver; pdriver = &amdgpu_kms_pci_driver; - driver->driver_features |= DRIVER_MODESET; driver->num_ioctls = amdgpu_max_kms_ioctl; amdgpu_register_atpx_handler(); /* let modprobe override vga console setting */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 8fab6486064f..88fbed2389c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -503,7 +503,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (r) goto error_print; - amdgpu_vm_get_pt_bos(bo_va->vm, &duplicates); + amdgpu_vm_get_pt_bos(adev, bo_va->vm, &duplicates); list_for_each_entry(entry, &list, head) { domain = amdgpu_mem_type_to_domain(entry->bo->mem.mem_type); /* if anything is swapped out don't swap it in here, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 9f95da4f0536..a074edd95c70 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -70,3 +70,47 @@ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg) } } } + +/** + * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter + * + * @mask: array in which the per-shader array disable masks will be stored + * @max_se: number of SEs + * @max_sh: number of SHs + * + * The bitmask of CUs to be disabled in the shader array determined by se and + * sh is stored in mask[se * max_sh + sh]. + */ +void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh) +{ + unsigned se, sh, cu; + const char *p; + + memset(mask, 0, sizeof(*mask) * max_se * max_sh); + + if (!amdgpu_disable_cu || !*amdgpu_disable_cu) + return; + + p = amdgpu_disable_cu; + for (;;) { + char *next; + int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu); + if (ret < 3) { + DRM_ERROR("amdgpu: could not parse disable_cu\n"); + return; + } + + if (se < max_se && sh < max_sh && cu < 16) { + DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu); + mask[se * max_sh + sh] |= 1u << cu; + } else { + DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n", + se, sh, cu); + } + + next = strchr(p, ','); + if (!next) + break; + p = next + 1; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index dc06cbda7be6..51321e154c09 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -27,4 +27,6 @@ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg); void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg); +unsigned amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh); + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 34e35423b78e..46c3097c5224 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -160,10 +160,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, patch_offset = amdgpu_ring_init_cond_exec(ring); if (vm) { - r = amdgpu_vm_flush(ring, job->vm_id, job->vm_pd_addr, - job->gds_base, job->gds_size, - job->gws_base, job->gws_size, - job->oa_base, job->oa_size); + r = amdgpu_vm_flush(ring, job); if (r) { amdgpu_ring_undo(ring); return r; @@ -203,11 +200,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, } /* wrap the last IB with fence */ - if (job && job->uf_bo) { - uint64_t addr = amdgpu_bo_gpu_offset(job->uf_bo); - - addr += job->uf_offset; - amdgpu_ring_emit_fence(ring, addr, job->uf_sequence, + if (job && job->uf_addr) { + amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence, AMDGPU_FENCE_FLAG_64BIT); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 835a3fa8d8df..278708f5a744 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -383,6 +383,18 @@ int amdgpu_irq_update(struct amdgpu_device *adev, return r; } +void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) +{ + int i, j; + for (i = 0; i < AMDGPU_MAX_IRQ_SRC_ID; i++) { + struct amdgpu_irq_src *src = adev->irq.sources[i]; + if (!src) + continue; + for (j = 0; j < src->num_types; j++) + amdgpu_irq_update(adev, src, j); + } +} + /** * amdgpu_irq_get - enable interrupt * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h index e124b59f39c1..7ef09352e534 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.h @@ -94,6 +94,7 @@ int amdgpu_irq_put(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type); bool amdgpu_irq_enabled(struct amdgpu_device *adev, struct amdgpu_irq_src *src, unsigned type); +void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev); int amdgpu_irq_add_domain(struct amdgpu_device *adev); void amdgpu_irq_remove_domain(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index f0dafa514fe4..aaee0c8f6731 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -28,21 +28,15 @@ #include "amdgpu.h" #include "amdgpu_trace.h" -static void amdgpu_job_free_handler(struct work_struct *ws) +static void amdgpu_job_timedout(struct amd_sched_job *s_job) { - struct amdgpu_job *job = container_of(ws, struct amdgpu_job, base.work_free_job); - amd_sched_job_put(&job->base); -} + struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); -void amdgpu_job_timeout_func(struct work_struct *work) -{ - struct amdgpu_job *job = container_of(work, struct amdgpu_job, base.work_tdr.work); DRM_ERROR("ring %s timeout, last signaled seq=%u, last emitted seq=%u\n", - job->base.sched->name, - (uint32_t)atomic_read(&job->ring->fence_drv.last_seq), - job->ring->fence_drv.sync_seq); - - amd_sched_job_put(&job->base); + job->base.sched->name, + atomic_read(&job->ring->fence_drv.last_seq), + job->ring->fence_drv.sync_seq); + amdgpu_gpu_reset(job->adev); } int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, @@ -63,7 +57,6 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, (*job)->vm = vm; (*job)->ibs = (void *)&(*job)[1]; (*job)->num_ibs = num_ibs; - INIT_WORK(&(*job)->base.work_free_job, amdgpu_job_free_handler); amdgpu_sync_create(&(*job)->sync); @@ -86,27 +79,33 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, return r; } -void amdgpu_job_free(struct amdgpu_job *job) +void amdgpu_job_free_resources(struct amdgpu_job *job) { - unsigned i; struct fence *f; + unsigned i; + /* use sched fence if available */ - f = (job->base.s_fence)? &job->base.s_fence->base : job->fence; + f = job->base.s_fence ? &job->base.s_fence->finished : job->fence; for (i = 0; i < job->num_ibs; ++i) - amdgpu_sa_bo_free(job->adev, &job->ibs[i].sa_bo, f); - fence_put(job->fence); + amdgpu_ib_free(job->adev, &job->ibs[i], f); +} - amdgpu_bo_unref(&job->uf_bo); - amdgpu_sync_free(&job->sync); +void amdgpu_job_free_cb(struct amd_sched_job *s_job) +{ + struct amdgpu_job *job = container_of(s_job, struct amdgpu_job, base); - if (!job->base.use_sched) - kfree(job); + fence_put(job->fence); + amdgpu_sync_free(&job->sync); + kfree(job); } -void amdgpu_job_free_func(struct kref *refcount) +void amdgpu_job_free(struct amdgpu_job *job) { - struct amdgpu_job *job = container_of(refcount, struct amdgpu_job, base.refcount); + amdgpu_job_free_resources(job); + + fence_put(job->fence); + amdgpu_sync_free(&job->sync); kfree(job); } @@ -114,22 +113,20 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct amd_sched_entity *entity, void *owner, struct fence **f) { - struct fence *fence; int r; job->ring = ring; if (!f) return -EINVAL; - r = amd_sched_job_init(&job->base, &ring->sched, - entity, amdgpu_job_timeout_func, - amdgpu_job_free_func, owner, &fence); + r = amd_sched_job_init(&job->base, &ring->sched, entity, owner); if (r) return r; job->owner = owner; job->ctx = entity->fence_context; - *f = fence_get(fence); + *f = fence_get(&job->base.s_fence->finished); + amdgpu_job_free_resources(job); amd_sched_entity_push_job(&job->base); return 0; @@ -147,8 +144,8 @@ static struct fence *amdgpu_job_dependency(struct amd_sched_job *sched_job) int r; r = amdgpu_vm_grab_id(vm, ring, &job->sync, - &job->base.s_fence->base, - &job->vm_id, &job->vm_pd_addr); + &job->base.s_fence->finished, + job); if (r) DRM_ERROR("Error getting VM ID (%d)\n", r); @@ -170,11 +167,7 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) } job = to_amdgpu_job(sched_job); - r = amdgpu_sync_wait(&job->sync); - if (r) { - DRM_ERROR("failed to sync wait (%d)\n", r); - return NULL; - } + BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); trace_amdgpu_sched_run_job(job); r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, @@ -185,14 +178,15 @@ static struct fence *amdgpu_job_run(struct amd_sched_job *sched_job) } err: + /* if gpu reset, hw fence will be replaced here */ + fence_put(job->fence); job->fence = fence; - amdgpu_job_free(job); return fence; } const struct amd_sched_backend_ops amdgpu_sched_ops = { .dependency = amdgpu_job_dependency, .run_job = amdgpu_job_run, - .begin_job = amd_sched_job_begin, - .finish_job = amd_sched_job_finish, + .timedout_job = amdgpu_job_timedout, + .free_job = amdgpu_job_free_cb }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 40a23704a981..a8efbb54423f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -60,7 +60,10 @@ int amdgpu_driver_unload_kms(struct drm_device *dev) if (adev->rmmio == NULL) goto done_free; - pm_runtime_get_sync(dev->dev); + if (amdgpu_device_is_px(dev)) { + pm_runtime_get_sync(dev->dev); + pm_runtime_forbid(dev->dev); + } amdgpu_amdkfd_device_fini(adev); @@ -135,13 +138,75 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags) } out: - if (r) + if (r) { + /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ + if (adev->rmmio && amdgpu_device_is_px(dev)) + pm_runtime_put_noidle(dev->dev); amdgpu_driver_unload_kms(dev); - + } return r; } +static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, + struct drm_amdgpu_query_fw *query_fw, + struct amdgpu_device *adev) +{ + switch (query_fw->fw_type) { + case AMDGPU_INFO_FW_VCE: + fw_info->ver = adev->vce.fw_version; + fw_info->feature = adev->vce.fb_version; + break; + case AMDGPU_INFO_FW_UVD: + fw_info->ver = adev->uvd.fw_version; + fw_info->feature = 0; + break; + case AMDGPU_INFO_FW_GMC: + fw_info->ver = adev->mc.fw_version; + fw_info->feature = 0; + break; + case AMDGPU_INFO_FW_GFX_ME: + fw_info->ver = adev->gfx.me_fw_version; + fw_info->feature = adev->gfx.me_feature_version; + break; + case AMDGPU_INFO_FW_GFX_PFP: + fw_info->ver = adev->gfx.pfp_fw_version; + fw_info->feature = adev->gfx.pfp_feature_version; + break; + case AMDGPU_INFO_FW_GFX_CE: + fw_info->ver = adev->gfx.ce_fw_version; + fw_info->feature = adev->gfx.ce_feature_version; + break; + case AMDGPU_INFO_FW_GFX_RLC: + fw_info->ver = adev->gfx.rlc_fw_version; + fw_info->feature = adev->gfx.rlc_feature_version; + break; + case AMDGPU_INFO_FW_GFX_MEC: + if (query_fw->index == 0) { + fw_info->ver = adev->gfx.mec_fw_version; + fw_info->feature = adev->gfx.mec_feature_version; + } else if (query_fw->index == 1) { + fw_info->ver = adev->gfx.mec2_fw_version; + fw_info->feature = adev->gfx.mec2_feature_version; + } else + return -EINVAL; + break; + case AMDGPU_INFO_FW_SMC: + fw_info->ver = adev->pm.fw_version; + fw_info->feature = 0; + break; + case AMDGPU_INFO_FW_SDMA: + if (query_fw->index >= adev->sdma.num_instances) + return -EINVAL; + fw_info->ver = adev->sdma.instance[query_fw->index].fw_version; + fw_info->feature = adev->sdma.instance[query_fw->index].feature_version; + break; + default: + return -EINVAL; + } + return 0; +} + /* * Userspace get information ioctl */ @@ -288,67 +353,20 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file return copy_to_user(out, &count, min(size, 4u)) ? -EFAULT : 0; } case AMDGPU_INFO_TIMESTAMP: - ui64 = amdgpu_asic_get_gpu_clock_counter(adev); + ui64 = amdgpu_gfx_get_gpu_clock_counter(adev); return copy_to_user(out, &ui64, min(size, 8u)) ? -EFAULT : 0; case AMDGPU_INFO_FW_VERSION: { struct drm_amdgpu_info_firmware fw_info; + int ret; /* We only support one instance of each IP block right now. */ if (info->query_fw.ip_instance != 0) return -EINVAL; - switch (info->query_fw.fw_type) { - case AMDGPU_INFO_FW_VCE: - fw_info.ver = adev->vce.fw_version; - fw_info.feature = adev->vce.fb_version; - break; - case AMDGPU_INFO_FW_UVD: - fw_info.ver = adev->uvd.fw_version; - fw_info.feature = 0; - break; - case AMDGPU_INFO_FW_GMC: - fw_info.ver = adev->mc.fw_version; - fw_info.feature = 0; - break; - case AMDGPU_INFO_FW_GFX_ME: - fw_info.ver = adev->gfx.me_fw_version; - fw_info.feature = adev->gfx.me_feature_version; - break; - case AMDGPU_INFO_FW_GFX_PFP: - fw_info.ver = adev->gfx.pfp_fw_version; - fw_info.feature = adev->gfx.pfp_feature_version; - break; - case AMDGPU_INFO_FW_GFX_CE: - fw_info.ver = adev->gfx.ce_fw_version; - fw_info.feature = adev->gfx.ce_feature_version; - break; - case AMDGPU_INFO_FW_GFX_RLC: - fw_info.ver = adev->gfx.rlc_fw_version; - fw_info.feature = adev->gfx.rlc_feature_version; - break; - case AMDGPU_INFO_FW_GFX_MEC: - if (info->query_fw.index == 0) { - fw_info.ver = adev->gfx.mec_fw_version; - fw_info.feature = adev->gfx.mec_feature_version; - } else if (info->query_fw.index == 1) { - fw_info.ver = adev->gfx.mec2_fw_version; - fw_info.feature = adev->gfx.mec2_feature_version; - } else - return -EINVAL; - break; - case AMDGPU_INFO_FW_SMC: - fw_info.ver = adev->pm.fw_version; - fw_info.feature = 0; - break; - case AMDGPU_INFO_FW_SDMA: - if (info->query_fw.index >= adev->sdma.num_instances) - return -EINVAL; - fw_info.ver = adev->sdma.instance[info->query_fw.index].fw_version; - fw_info.feature = adev->sdma.instance[info->query_fw.index].feature_version; - break; - default: - return -EINVAL; - } + ret = amdgpu_firmware_info(&fw_info, &info->query_fw, adev); + if (ret) + return ret; + return copy_to_user(out, &fw_info, min((size_t)size, sizeof(fw_info))) ? -EFAULT : 0; } @@ -447,7 +465,8 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file dev_info.max_memory_clock = adev->pm.default_mclk * 10; } dev_info.enabled_rb_pipes_mask = adev->gfx.config.backend_enable_mask; - dev_info.num_rb_pipes = adev->gfx.config.num_rbs; + dev_info.num_rb_pipes = adev->gfx.config.max_backends_per_se * + adev->gfx.config.max_shader_engines; dev_info.num_hw_gfx_contexts = adev->gfx.config.max_hw_contexts; dev_info._pad = 0; dev_info.ids_flags = 0; @@ -755,3 +774,130 @@ const struct drm_ioctl_desc amdgpu_ioctls_kms[] = { DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), }; const int amdgpu_max_kms_ioctl = ARRAY_SIZE(amdgpu_ioctls_kms); + +/* + * Debugfs info + */ +#if defined(CONFIG_DEBUG_FS) + +static int amdgpu_debugfs_firmware_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct drm_device *dev = node->minor->dev; + struct amdgpu_device *adev = dev->dev_private; + struct drm_amdgpu_info_firmware fw_info; + struct drm_amdgpu_query_fw query_fw; + int ret, i; + + /* VCE */ + query_fw.fw_type = AMDGPU_INFO_FW_VCE; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "VCE feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* UVD */ + query_fw.fw_type = AMDGPU_INFO_FW_UVD; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "UVD feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* GMC */ + query_fw.fw_type = AMDGPU_INFO_FW_GMC; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "MC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* ME */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_ME; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "ME feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* PFP */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_PFP; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "PFP feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* CE */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_CE; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "CE feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* RLC */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_RLC; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "RLC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* MEC */ + query_fw.fw_type = AMDGPU_INFO_FW_GFX_MEC; + query_fw.index = 0; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "MEC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* MEC2 */ + if (adev->asic_type == CHIP_KAVERI || + (adev->asic_type > CHIP_TOPAZ && adev->asic_type != CHIP_STONEY)) { + query_fw.index = 1; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "MEC2 feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + } + + /* SMC */ + query_fw.fw_type = AMDGPU_INFO_FW_SMC; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + + /* SDMA */ + query_fw.fw_type = AMDGPU_INFO_FW_SDMA; + for (i = 0; i < adev->sdma.num_instances; i++) { + query_fw.index = i; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "SDMA%d feature version: %u, firmware version: 0x%08x\n", + i, fw_info.feature, fw_info.ver); + } + + return 0; +} + +static const struct drm_info_list amdgpu_firmware_info_list[] = { + {"amdgpu_firmware_info", amdgpu_debugfs_firmware_info, 0, NULL}, +}; +#endif + +int amdgpu_debugfs_firmware_init(struct amdgpu_device *adev) +{ +#if defined(CONFIG_DEBUG_FS) + return amdgpu_debugfs_add_files(adev, amdgpu_firmware_info_list, + ARRAY_SIZE(amdgpu_firmware_info_list)); +#else + return 0; +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 7ecea83ce453..6f0873c75a25 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -589,6 +589,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct amdgpu_bo *rbo; + struct ttm_mem_reg *old_mem = &bo->mem; if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) return; @@ -602,6 +603,8 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, /* move_notify is called before move happens */ amdgpu_update_memory_usage(rbo->adev, &bo->mem, new_mem); + + trace_amdgpu_ttm_bo_move(rbo, new_mem->mem_type, old_mem->mem_type); } int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c index 589b36e8c5cf..ff63b88b0ffa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c @@ -270,30 +270,28 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = ddev->dev_private; enum amd_pm_state_type state = 0; - long idx; + unsigned long idx; int ret; if (strlen(buf) == 1) adev->pp_force_state_enabled = false; - else { - ret = kstrtol(buf, 0, &idx); + else if (adev->pp_enabled) { + struct pp_states_info data; - if (ret) { + ret = kstrtoul(buf, 0, &idx); + if (ret || idx >= ARRAY_SIZE(data.states)) { count = -EINVAL; goto fail; } - if (adev->pp_enabled) { - struct pp_states_info data; - amdgpu_dpm_get_pp_num_states(adev, &data); - state = data.states[idx]; - /* only set user selected power states */ - if (state != POWER_STATE_TYPE_INTERNAL_BOOT && - state != POWER_STATE_TYPE_DEFAULT) { - amdgpu_dpm_dispatch_task(adev, - AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); - adev->pp_force_state_enabled = true; - } + amdgpu_dpm_get_pp_num_states(adev, &data); + state = data.states[idx]; + /* only set user selected power states */ + if (state != POWER_STATE_TYPE_INTERNAL_BOOT && + state != POWER_STATE_TYPE_DEFAULT) { + amdgpu_dpm_dispatch_task(adev, + AMD_PP_EVENT_ENABLE_USER_STATE, &state, NULL); + adev->pp_force_state_enabled = true; } } fail: @@ -349,6 +347,8 @@ static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, if (adev->pp_enabled) size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); + else if (adev->pm.funcs->print_clock_levels) + size = adev->pm.funcs->print_clock_levels(adev, PP_SCLK, buf); return size; } @@ -365,7 +365,9 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, uint32_t i, mask = 0; char sub_str[2]; - for (i = 0; i < strlen(buf) - 1; i++) { + for (i = 0; i < strlen(buf); i++) { + if (*(buf + i) == '\n') + continue; sub_str[0] = *(buf + i); sub_str[1] = '\0'; ret = kstrtol(sub_str, 0, &level); @@ -379,6 +381,8 @@ static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, if (adev->pp_enabled) amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); + else if (adev->pm.funcs->force_clock_level) + adev->pm.funcs->force_clock_level(adev, PP_SCLK, mask); fail: return count; } @@ -393,6 +397,8 @@ static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, if (adev->pp_enabled) size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); + else if (adev->pm.funcs->print_clock_levels) + size = adev->pm.funcs->print_clock_levels(adev, PP_MCLK, buf); return size; } @@ -409,7 +415,9 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, uint32_t i, mask = 0; char sub_str[2]; - for (i = 0; i < strlen(buf) - 1; i++) { + for (i = 0; i < strlen(buf); i++) { + if (*(buf + i) == '\n') + continue; sub_str[0] = *(buf + i); sub_str[1] = '\0'; ret = kstrtol(sub_str, 0, &level); @@ -423,6 +431,8 @@ static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, if (adev->pp_enabled) amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); + else if (adev->pm.funcs->force_clock_level) + adev->pm.funcs->force_clock_level(adev, PP_MCLK, mask); fail: return count; } @@ -437,6 +447,8 @@ static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, if (adev->pp_enabled) size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); + else if (adev->pm.funcs->print_clock_levels) + size = adev->pm.funcs->print_clock_levels(adev, PP_PCIE, buf); return size; } @@ -453,7 +465,9 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, uint32_t i, mask = 0; char sub_str[2]; - for (i = 0; i < strlen(buf) - 1; i++) { + for (i = 0; i < strlen(buf); i++) { + if (*(buf + i) == '\n') + continue; sub_str[0] = *(buf + i); sub_str[1] = '\0'; ret = kstrtol(sub_str, 0, &level); @@ -467,6 +481,100 @@ static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, if (adev->pp_enabled) amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); + else if (adev->pm.funcs->force_clock_level) + adev->pm.funcs->force_clock_level(adev, PP_PCIE, mask); +fail: + return count; +} + +static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint32_t value = 0; + + if (adev->pp_enabled) + value = amdgpu_dpm_get_sclk_od(adev); + else if (adev->pm.funcs->get_sclk_od) + value = adev->pm.funcs->get_sclk_od(adev); + + return snprintf(buf, PAGE_SIZE, "%d\n", value); +} + +static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long int value; + + ret = kstrtol(buf, 0, &value); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) { + amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); + amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); + } else if (adev->pm.funcs->set_sclk_od) { + adev->pm.funcs->set_sclk_od(adev, (uint32_t)value); + adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; + amdgpu_pm_compute_clocks(adev); + } + +fail: + return count; +} + +static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + uint32_t value = 0; + + if (adev->pp_enabled) + value = amdgpu_dpm_get_mclk_od(adev); + else if (adev->pm.funcs->get_mclk_od) + value = adev->pm.funcs->get_mclk_od(adev); + + return snprintf(buf, PAGE_SIZE, "%d\n", value); +} + +static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + struct drm_device *ddev = dev_get_drvdata(dev); + struct amdgpu_device *adev = ddev->dev_private; + int ret; + long int value; + + ret = kstrtol(buf, 0, &value); + + if (ret) { + count = -EINVAL; + goto fail; + } + + if (adev->pp_enabled) { + amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); + amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_READJUST_POWER_STATE, NULL, NULL); + } else if (adev->pm.funcs->set_mclk_od) { + adev->pm.funcs->set_mclk_od(adev, (uint32_t)value); + adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; + amdgpu_pm_compute_clocks(adev); + } + fail: return count; } @@ -492,6 +600,12 @@ static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, amdgpu_get_pp_dpm_pcie, amdgpu_set_pp_dpm_pcie); +static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, + amdgpu_get_pp_sclk_od, + amdgpu_set_pp_sclk_od); +static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, + amdgpu_get_pp_mclk_od, + amdgpu_set_pp_mclk_od); static ssize_t amdgpu_hwmon_show_temp(struct device *dev, struct device_attribute *attr, @@ -1110,22 +1224,34 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) DRM_ERROR("failed to create device file pp_table\n"); return ret; } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_sclk\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_mclk\n"); - return ret; - } - ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); - if (ret) { - DRM_ERROR("failed to create device file pp_dpm_pcie\n"); - return ret; - } } + + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_sclk\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_mclk\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); + if (ret) { + DRM_ERROR("failed to create device file pp_dpm_pcie\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); + if (ret) { + DRM_ERROR("failed to create device file pp_sclk_od\n"); + return ret; + } + ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); + if (ret) { + DRM_ERROR("failed to create device file pp_mclk_od\n"); + return ret; + } + ret = amdgpu_debugfs_pm_init(adev); if (ret) { DRM_ERROR("Failed to register debugfs file for dpm!\n"); @@ -1148,10 +1274,12 @@ void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) device_remove_file(adev->dev, &dev_attr_pp_cur_state); device_remove_file(adev->dev, &dev_attr_pp_force_state); device_remove_file(adev->dev, &dev_attr_pp_table); - device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); - device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); } + device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); + device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); + device_remove_file(adev->dev, &dev_attr_pp_sclk_od); + device_remove_file(adev->dev, &dev_attr_pp_mclk_od); } void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c index 6bd961fb43dc..c5738a22b690 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c @@ -52,6 +52,7 @@ static int amdgpu_powerplay_init(struct amdgpu_device *adev) pp_init->chip_family = adev->family; pp_init->chip_id = adev->asic_type; pp_init->device = amdgpu_cgs_create_device(adev); + pp_init->powercontainment_enabled = amdgpu_powercontainment; ret = amd_powerplay_init(pp_init, amd_pp); kfree(pp_init); @@ -183,13 +184,6 @@ static int amdgpu_pp_sw_fini(void *handle) if (ret) return ret; -#ifdef CONFIG_DRM_AMD_POWERPLAY - if (adev->pp_enabled) { - amdgpu_pm_sysfs_fini(adev); - amd_powerplay_fini(adev->powerplay.pp_handle); - } -#endif - return ret; } @@ -223,6 +217,22 @@ static int amdgpu_pp_hw_fini(void *handle) return ret; } +static void amdgpu_pp_late_fini(void *handle) +{ +#ifdef CONFIG_DRM_AMD_POWERPLAY + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pp_enabled) { + amdgpu_pm_sysfs_fini(adev); + amd_powerplay_fini(adev->powerplay.pp_handle); + } + + if (adev->powerplay.ip_funcs->late_fini) + adev->powerplay.ip_funcs->late_fini( + adev->powerplay.pp_handle); +#endif +} + static int amdgpu_pp_suspend(void *handle) { int ret = 0; @@ -311,6 +321,7 @@ const struct amd_ip_funcs amdgpu_pp_ip_funcs = { .sw_fini = amdgpu_pp_sw_fini, .hw_init = amdgpu_pp_hw_init, .hw_fini = amdgpu_pp_hw_fini, + .late_fini = amdgpu_pp_late_fini, .suspend = amdgpu_pp_suspend, .resume = amdgpu_pp_resume, .is_idle = amdgpu_pp_is_idle, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 3b02272db678..3b885e3e9b56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -28,6 +28,7 @@ */ #include <linux/seq_file.h> #include <linux/slab.h> +#include <linux/debugfs.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -48,6 +49,7 @@ */ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring); +static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring); /** * amdgpu_ring_alloc - allocate space on the ring buffer @@ -140,78 +142,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring) } /** - * amdgpu_ring_backup - Back up the content of a ring - * - * @ring: the ring we want to back up - * - * Saves all unprocessed commits from a ring, returns the number of dwords saved. - */ -unsigned amdgpu_ring_backup(struct amdgpu_ring *ring, - uint32_t **data) -{ - unsigned size, ptr, i; - - *data = NULL; - - if (ring->ring_obj == NULL) - return 0; - - /* it doesn't make sense to save anything if all fences are signaled */ - if (!amdgpu_fence_count_emitted(ring)) - return 0; - - ptr = le32_to_cpu(*ring->next_rptr_cpu_addr); - - size = ring->wptr + (ring->ring_size / 4); - size -= ptr; - size &= ring->ptr_mask; - if (size == 0) - return 0; - - /* and then save the content of the ring */ - *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); - if (!*data) - return 0; - for (i = 0; i < size; ++i) { - (*data)[i] = ring->ring[ptr++]; - ptr &= ring->ptr_mask; - } - - return size; -} - -/** - * amdgpu_ring_restore - append saved commands to the ring again - * - * @ring: ring to append commands to - * @size: number of dwords we want to write - * @data: saved commands - * - * Allocates space on the ring and restore the previously saved commands. - */ -int amdgpu_ring_restore(struct amdgpu_ring *ring, - unsigned size, uint32_t *data) -{ - int i, r; - - if (!size || !data) - return 0; - - /* restore the saved ring content */ - r = amdgpu_ring_alloc(ring, size); - if (r) - return r; - - for (i = 0; i < size; ++i) { - amdgpu_ring_write(ring, data[i]); - } - - amdgpu_ring_commit(ring); - kfree(data); - return 0; -} - -/** * amdgpu_ring_init - init driver ring struct. * * @adev: amdgpu_device pointer @@ -260,14 +190,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, return r; } - r = amdgpu_wb_get(adev, &ring->next_rptr_offs); - if (r) { - dev_err(adev->dev, "(%d) ring next_rptr wb alloc failed\n", r); - return r; - } - ring->next_rptr_gpu_addr = adev->wb.gpu_addr + ring->next_rptr_offs * 4; - ring->next_rptr_cpu_addr = &adev->wb.wb[ring->next_rptr_offs]; - r = amdgpu_wb_get(adev, &ring->cond_exe_offs); if (r) { dev_err(adev->dev, "(%d) ring cond_exec_polling wb alloc failed\n", r); @@ -310,6 +232,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring, } r = amdgpu_bo_kmap(ring->ring_obj, (void **)&ring->ring); + + memset((void *)ring->ring, 0, ring->ring_size); + amdgpu_bo_unreserve(ring->ring_obj); if (r) { dev_err(adev->dev, "(%d) ring map failed\n", r); @@ -343,10 +268,10 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) ring->ring = NULL; ring->ring_obj = NULL; + amdgpu_wb_free(ring->adev, ring->cond_exe_offs); amdgpu_wb_free(ring->adev, ring->fence_offs); amdgpu_wb_free(ring->adev, ring->rptr_offs); amdgpu_wb_free(ring->adev, ring->wptr_offs); - amdgpu_wb_free(ring->adev, ring->next_rptr_offs); if (ring_obj) { r = amdgpu_bo_reserve(ring_obj, false); @@ -357,6 +282,7 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) } amdgpu_bo_unref(&ring_obj); } + amdgpu_debugfs_ring_fini(ring); } /* @@ -364,57 +290,62 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring) */ #if defined(CONFIG_DEBUG_FS) -static int amdgpu_debugfs_ring_info(struct seq_file *m, void *data) +/* Layout of file is 12 bytes consisting of + * - rptr + * - wptr + * - driver's copy of wptr + * + * followed by n-words of ring data + */ +static ssize_t amdgpu_debugfs_ring_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) { - struct drm_info_node *node = (struct drm_info_node *) m->private; - struct drm_device *dev = node->minor->dev; - struct amdgpu_device *adev = dev->dev_private; - int roffset = (unsigned long)node->info_ent->data; - struct amdgpu_ring *ring = (void *)(((uint8_t*)adev) + roffset); - uint32_t rptr, wptr, rptr_next; - unsigned i; - - wptr = amdgpu_ring_get_wptr(ring); - seq_printf(m, "wptr: 0x%08x [%5d]\n", wptr, wptr); - - rptr = amdgpu_ring_get_rptr(ring); - rptr_next = le32_to_cpu(*ring->next_rptr_cpu_addr); - - seq_printf(m, "rptr: 0x%08x [%5d]\n", rptr, rptr); - - seq_printf(m, "driver's copy of the wptr: 0x%08x [%5d]\n", - ring->wptr, ring->wptr); - - if (!ring->ready) - return 0; - - /* print 8 dw before current rptr as often it's the last executed - * packet that is the root issue - */ - i = (rptr + ring->ptr_mask + 1 - 32) & ring->ptr_mask; - while (i != rptr) { - seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); - if (i == rptr) - seq_puts(m, " *"); - if (i == rptr_next) - seq_puts(m, " #"); - seq_puts(m, "\n"); - i = (i + 1) & ring->ptr_mask; + struct amdgpu_ring *ring = (struct amdgpu_ring*)f->f_inode->i_private; + int r, i; + uint32_t value, result, early[3]; + + if (*pos & 3 || size & 3) + return -EINVAL; + + result = 0; + + if (*pos < 12) { + early[0] = amdgpu_ring_get_rptr(ring); + early[1] = amdgpu_ring_get_wptr(ring); + early[2] = ring->wptr; + for (i = *pos / 4; i < 3 && size; i++) { + r = put_user(early[i], (uint32_t *)buf); + if (r) + return r; + buf += 4; + result += 4; + size -= 4; + *pos += 4; + } } - while (i != wptr) { - seq_printf(m, "r[%5d]=0x%08x", i, ring->ring[i]); - if (i == rptr) - seq_puts(m, " *"); - if (i == rptr_next) - seq_puts(m, " #"); - seq_puts(m, "\n"); - i = (i + 1) & ring->ptr_mask; + + while (size) { + if (*pos >= (ring->ring_size + 12)) + return result; + + value = ring->ring[(*pos - 12)/4]; + r = put_user(value, (uint32_t*)buf); + if (r) + return r; + buf += 4; + result += 4; + size -= 4; + *pos += 4; } - return 0; + + return result; } -static struct drm_info_list amdgpu_debugfs_ring_info_list[AMDGPU_MAX_RINGS]; -static char amdgpu_debugfs_ring_names[AMDGPU_MAX_RINGS][32]; +static const struct file_operations amdgpu_debugfs_ring_fops = { + .owner = THIS_MODULE, + .read = amdgpu_debugfs_ring_read, + .llseek = default_llseek +}; #endif @@ -422,28 +353,27 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring) { #if defined(CONFIG_DEBUG_FS) - unsigned offset = (uint8_t*)ring - (uint8_t*)adev; - unsigned i; - struct drm_info_list *info; - char *name; - - for (i = 0; i < ARRAY_SIZE(amdgpu_debugfs_ring_info_list); ++i) { - info = &amdgpu_debugfs_ring_info_list[i]; - if (!info->data) - break; - } + struct drm_minor *minor = adev->ddev->primary; + struct dentry *ent, *root = minor->debugfs_root; + char name[32]; - if (i == ARRAY_SIZE(amdgpu_debugfs_ring_info_list)) - return -ENOSPC; - - name = &amdgpu_debugfs_ring_names[i][0]; sprintf(name, "amdgpu_ring_%s", ring->name); - info->name = name; - info->show = amdgpu_debugfs_ring_info; - info->driver_features = 0; - info->data = (void*)(uintptr_t)offset; - return amdgpu_debugfs_add_files(adev, info, 1); + ent = debugfs_create_file(name, + S_IFREG | S_IRUGO, root, + ring, &amdgpu_debugfs_ring_fops); + if (IS_ERR(ent)) + return PTR_ERR(ent); + + i_size_write(ent->d_inode, ring->ring_size + 12); + ring->ent = ent; #endif return 0; } + +static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring) +{ +#if defined(CONFIG_DEBUG_FS) + debugfs_remove(ring->ent); +#endif +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c index b16366c2b4a0..d8af37a845f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c @@ -115,6 +115,7 @@ int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev, return r; } r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); + memset(sa_manager->cpu_ptr, 0, sa_manager->size); amdgpu_bo_unreserve(sa_manager->bo); return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c index 34a92808bbd4..5c8d3022fb87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c @@ -223,13 +223,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, } /** - * amdgpu_sync_is_idle - test if all fences are signaled + * amdgpu_sync_peek_fence - get the next fence not signaled yet * * @sync: the sync object + * @ring: optional ring to use for test * - * Returns true if all fences in the sync object are signaled. + * Returns the next fence not signaled yet without removing it from the sync + * object. */ -bool amdgpu_sync_is_idle(struct amdgpu_sync *sync) +struct fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync, + struct amdgpu_ring *ring) { struct amdgpu_sync_entry *e; struct hlist_node *tmp; @@ -237,6 +240,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync) hash_for_each_safe(sync->fences, i, tmp, e, node) { struct fence *f = e->fence; + struct amd_sched_fence *s_fence = to_amd_sched_fence(f); + + if (ring && s_fence) { + /* For fences from the same ring it is sufficient + * when they are scheduled. + */ + if (s_fence->sched == &ring->sched) { + if (fence_is_signaled(&s_fence->scheduled)) + continue; + + return &s_fence->scheduled; + } + } if (fence_is_signaled(f)) { hash_del(&e->node); @@ -245,58 +261,19 @@ bool amdgpu_sync_is_idle(struct amdgpu_sync *sync) continue; } - return false; + return f; } - return true; + return NULL; } /** - * amdgpu_sync_cycle_fences - move fences from one sync object into another + * amdgpu_sync_get_fence - get the next fence from the sync object * - * @dst: the destination sync object - * @src: the source sync object - * @fence: fence to add to source + * @sync: sync object to use * - * Remove all fences from source and put them into destination and add - * fence as new one into source. + * Get and removes the next fence from the sync object not signaled yet. */ -int amdgpu_sync_cycle_fences(struct amdgpu_sync *dst, struct amdgpu_sync *src, - struct fence *fence) -{ - struct amdgpu_sync_entry *e, *newone; - struct hlist_node *tmp; - int i; - - /* Allocate the new entry before moving the old ones */ - newone = kmem_cache_alloc(amdgpu_sync_slab, GFP_KERNEL); - if (!newone) - return -ENOMEM; - - hash_for_each_safe(src->fences, i, tmp, e, node) { - struct fence *f = e->fence; - - hash_del(&e->node); - if (fence_is_signaled(f)) { - fence_put(f); - kmem_cache_free(amdgpu_sync_slab, e); - continue; - } - - if (amdgpu_sync_add_later(dst, f)) { - kmem_cache_free(amdgpu_sync_slab, e); - continue; - } - - hash_add(dst->fences, &e->node, f->context); - } - - hash_add(src->fences, &newone->node, fence->context); - newone->fence = fence_get(fence); - - return 0; -} - struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) { struct amdgpu_sync_entry *e; @@ -319,25 +296,6 @@ struct fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync) return NULL; } -int amdgpu_sync_wait(struct amdgpu_sync *sync) -{ - struct amdgpu_sync_entry *e; - struct hlist_node *tmp; - int i, r; - - hash_for_each_safe(sync->fences, i, tmp, e, node) { - r = fence_wait(e->fence, false); - if (r) - return r; - - hash_del(&e->node); - fence_put(e->fence); - kmem_cache_free(amdgpu_sync_slab, e); - } - - return 0; -} - /** * amdgpu_sync_free - free the sync object * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 26a5f4acf584..499803f3ce3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -11,19 +11,68 @@ #define TRACE_SYSTEM amdgpu #define TRACE_INCLUDE_FILE amdgpu_trace +TRACE_EVENT(amdgpu_mm_rreg, + TP_PROTO(unsigned did, uint32_t reg, uint32_t value), + TP_ARGS(did, reg, value), + TP_STRUCT__entry( + __field(unsigned, did) + __field(uint32_t, reg) + __field(uint32_t, value) + ), + TP_fast_assign( + __entry->did = did; + __entry->reg = reg; + __entry->value = value; + ), + TP_printk("0x%04lx, 0x%04lx, 0x%08lx", + (unsigned long)__entry->did, + (unsigned long)__entry->reg, + (unsigned long)__entry->value) +); + +TRACE_EVENT(amdgpu_mm_wreg, + TP_PROTO(unsigned did, uint32_t reg, uint32_t value), + TP_ARGS(did, reg, value), + TP_STRUCT__entry( + __field(unsigned, did) + __field(uint32_t, reg) + __field(uint32_t, value) + ), + TP_fast_assign( + __entry->did = did; + __entry->reg = reg; + __entry->value = value; + ), + TP_printk("0x%04lx, 0x%04lx, 0x%08lx", + (unsigned long)__entry->did, + (unsigned long)__entry->reg, + (unsigned long)__entry->value) +); + TRACE_EVENT(amdgpu_bo_create, TP_PROTO(struct amdgpu_bo *bo), TP_ARGS(bo), TP_STRUCT__entry( __field(struct amdgpu_bo *, bo) __field(u32, pages) + __field(u32, type) + __field(u32, prefer) + __field(u32, allow) + __field(u32, visible) ), TP_fast_assign( __entry->bo = bo; __entry->pages = bo->tbo.num_pages; + __entry->type = bo->tbo.mem.mem_type; + __entry->prefer = bo->prefered_domains; + __entry->allow = bo->allowed_domains; + __entry->visible = bo->flags; ), - TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) + + TP_printk("bo=%p,pages=%u,type=%d,prefered=%d,allowed=%d,visible=%d", + __entry->bo, __entry->pages, __entry->type, + __entry->prefer, __entry->allow, __entry->visible) ); TRACE_EVENT(amdgpu_cs, @@ -64,7 +113,7 @@ TRACE_EVENT(amdgpu_cs_ioctl, __entry->adev = job->adev; __entry->sched_job = &job->base; __entry->ib = job->ibs; - __entry->fence = &job->base.s_fence->base; + __entry->fence = &job->base.s_fence->finished; __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), @@ -89,7 +138,7 @@ TRACE_EVENT(amdgpu_sched_run_job, __entry->adev = job->adev; __entry->sched_job = &job->base; __entry->ib = job->ibs; - __entry->fence = &job->base.s_fence->base; + __entry->fence = &job->base.s_fence->finished; __entry->ring_name = job->ring->name; __entry->num_ibs = job->num_ibs; ), @@ -244,13 +293,55 @@ TRACE_EVENT(amdgpu_bo_list_set, TP_STRUCT__entry( __field(struct amdgpu_bo_list *, list) __field(struct amdgpu_bo *, bo) + __field(u64, bo_size) ), TP_fast_assign( __entry->list = list; __entry->bo = bo; + __entry->bo_size = amdgpu_bo_size(bo); ), - TP_printk("list=%p, bo=%p", __entry->list, __entry->bo) + TP_printk("list=%p, bo=%p, bo_size = %Ld", + __entry->list, + __entry->bo, + __entry->bo_size) +); + +TRACE_EVENT(amdgpu_cs_bo_status, + TP_PROTO(uint64_t total_bo, uint64_t total_size), + TP_ARGS(total_bo, total_size), + TP_STRUCT__entry( + __field(u64, total_bo) + __field(u64, total_size) + ), + + TP_fast_assign( + __entry->total_bo = total_bo; + __entry->total_size = total_size; + ), + TP_printk("total bo size = %Ld, total bo count = %Ld", + __entry->total_bo, __entry->total_size) +); + +TRACE_EVENT(amdgpu_ttm_bo_move, + TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement), + TP_ARGS(bo, new_placement, old_placement), + TP_STRUCT__entry( + __field(struct amdgpu_bo *, bo) + __field(u64, bo_size) + __field(u32, new_placement) + __field(u32, old_placement) + ), + + TP_fast_assign( + __entry->bo = bo; + __entry->bo_size = amdgpu_bo_size(bo); + __entry->new_placement = new_placement; + __entry->old_placement = old_placement; + ), + TP_printk("bo=%p from:%d to %d with size = %Ld", + __entry->bo, __entry->old_placement, + __entry->new_placement, __entry->bo_size) ); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 3b9053af4762..b7742e62972a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -286,9 +286,10 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, r = amdgpu_copy_buffer(ring, old_start, new_start, new_mem->num_pages * PAGE_SIZE, /* bytes */ bo->resv, &fence); - /* FIXME: handle copy error */ - r = ttm_bo_move_accel_cleanup(bo, fence, - evict, no_wait_gpu, new_mem); + if (r) + return r; + + r = ttm_bo_pipeline_move(bo, fence, evict, new_mem); fence_put(fence); return r; } @@ -396,6 +397,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, return -EINVAL; adev = amdgpu_get_adev(bo->bdev); + + /* remember the eviction */ + if (evict) + atomic64_inc(&adev->num_evictions); + if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { amdgpu_move_null(bo, new_mem); return 0; @@ -429,7 +435,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, evict, interruptible, + no_wait_gpu, new_mem); if (r) { return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 01abfc21b4a2..e19520c4b4b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -253,19 +253,20 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev) { int r; - if (adev->uvd.vcpu_bo == NULL) - return 0; + kfree(adev->uvd.saved_bo); amd_sched_entity_fini(&adev->uvd.ring.sched, &adev->uvd.entity); - r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); - if (!r) { - amdgpu_bo_kunmap(adev->uvd.vcpu_bo); - amdgpu_bo_unpin(adev->uvd.vcpu_bo); - amdgpu_bo_unreserve(adev->uvd.vcpu_bo); - } + if (adev->uvd.vcpu_bo) { + r = amdgpu_bo_reserve(adev->uvd.vcpu_bo, false); + if (!r) { + amdgpu_bo_kunmap(adev->uvd.vcpu_bo); + amdgpu_bo_unpin(adev->uvd.vcpu_bo); + amdgpu_bo_unreserve(adev->uvd.vcpu_bo); + } - amdgpu_bo_unref(&adev->uvd.vcpu_bo); + amdgpu_bo_unref(&adev->uvd.vcpu_bo); + } amdgpu_ring_fini(&adev->uvd.ring); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 9f36ed30ba11..2f8496d48c94 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -25,6 +25,7 @@ * Alex Deucher * Jerome Glisse */ +#include <linux/fence-array.h> #include <drm/drmP.h> #include <drm/amdgpu_drm.h> #include "amdgpu.h" @@ -114,16 +115,26 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, /** * amdgpu_vm_get_bos - add the vm BOs to a duplicates list * + * @adev: amdgpu device pointer * @vm: vm providing the BOs * @duplicates: head of duplicates list * * Add the page directory to the BO duplicates list * for command submission. */ -void amdgpu_vm_get_pt_bos(struct amdgpu_vm *vm, struct list_head *duplicates) +void amdgpu_vm_get_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct list_head *duplicates) { + uint64_t num_evictions; unsigned i; + /* We only need to validate the page tables + * if they aren't already valid. + */ + num_evictions = atomic64_read(&adev->num_evictions); + if (num_evictions == vm->last_eviction_counter) + return; + /* add the vm page table to the list */ for (i = 0; i <= vm->max_pde_used; ++i) { struct amdgpu_bo_list_entry *entry = &vm->page_tables[i].entry; @@ -162,6 +173,13 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, spin_unlock(&glob->lru_lock); } +static bool amdgpu_vm_is_gpu_reset(struct amdgpu_device *adev, + struct amdgpu_vm_id *id) +{ + return id->current_gpu_reset_count != + atomic_read(&adev->gpu_reset_counter) ? true : false; +} + /** * amdgpu_vm_grab_id - allocate the next free VMID * @@ -174,20 +192,69 @@ void amdgpu_vm_move_pt_bos_in_lru(struct amdgpu_device *adev, */ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, struct amdgpu_sync *sync, struct fence *fence, - unsigned *vm_id, uint64_t *vm_pd_addr) + struct amdgpu_job *job) { - uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory); struct amdgpu_device *adev = ring->adev; struct fence *updates = sync->last_vm_update; - struct amdgpu_vm_id *id; - unsigned i = ring->idx; - int r; + struct amdgpu_vm_id *id, *idle; + struct fence **fences; + unsigned i; + int r = 0; + + fences = kmalloc_array(sizeof(void *), adev->vm_manager.num_ids, + GFP_KERNEL); + if (!fences) + return -ENOMEM; mutex_lock(&adev->vm_manager.lock); + /* Check if we have an idle VMID */ + i = 0; + list_for_each_entry(idle, &adev->vm_manager.ids_lru, list) { + fences[i] = amdgpu_sync_peek_fence(&idle->active, ring); + if (!fences[i]) + break; + ++i; + } + + /* If we can't find a idle VMID to use, wait till one becomes available */ + if (&idle->list == &adev->vm_manager.ids_lru) { + u64 fence_context = adev->vm_manager.fence_context + ring->idx; + unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; + struct fence_array *array; + unsigned j; + + for (j = 0; j < i; ++j) + fence_get(fences[j]); + + array = fence_array_create(i, fences, fence_context, + seqno, true); + if (!array) { + for (j = 0; j < i; ++j) + fence_put(fences[j]); + kfree(fences); + r = -ENOMEM; + goto error; + } + + + r = amdgpu_sync_fence(ring->adev, sync, &array->base); + fence_put(&array->base); + if (r) + goto error; + + mutex_unlock(&adev->vm_manager.lock); + return 0; + + } + kfree(fences); + + job->vm_needs_flush = true; /* Check if we can use a VMID already assigned to this VM */ + i = ring->idx; do { struct fence *flushed; + bool same_ring = ring->idx == i; id = vm->ids[i++]; if (i == AMDGPU_MAX_RINGS) @@ -196,67 +263,49 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, /* Check all the prerequisites to using this VMID */ if (!id) continue; + if (amdgpu_vm_is_gpu_reset(adev, id)) + continue; if (atomic64_read(&id->owner) != vm->client_id) continue; - if (pd_addr != id->pd_gpu_addr) + if (job->vm_pd_addr != id->pd_gpu_addr) continue; - if (id->last_user != ring && + if (!same_ring && (!id->last_flush || !fence_is_signaled(id->last_flush))) continue; flushed = id->flushed_updates; - if (updates && (!flushed || fence_is_later(updates, flushed))) + if (updates && + (!flushed || fence_is_later(updates, flushed))) continue; - /* Good we can use this VMID */ - if (id->last_user == ring) { - r = amdgpu_sync_fence(ring->adev, sync, - id->first); - if (r) - goto error; - } - - /* And remember this submission as user of the VMID */ + /* Good we can use this VMID. Remember this submission as + * user of the VMID. + */ r = amdgpu_sync_fence(ring->adev, &id->active, fence); if (r) goto error; + id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); list_move_tail(&id->list, &adev->vm_manager.ids_lru); vm->ids[ring->idx] = id; - *vm_id = id - adev->vm_manager.ids; - *vm_pd_addr = AMDGPU_VM_NO_FLUSH; - trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); + job->vm_id = id - adev->vm_manager.ids; + job->vm_needs_flush = false; + trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr); mutex_unlock(&adev->vm_manager.lock); return 0; } while (i != ring->idx); - id = list_first_entry(&adev->vm_manager.ids_lru, - struct amdgpu_vm_id, - list); + /* Still no ID to use? Then use the idle one found earlier */ + id = idle; - if (!amdgpu_sync_is_idle(&id->active)) { - struct list_head *head = &adev->vm_manager.ids_lru; - struct amdgpu_vm_id *tmp; - - list_for_each_entry_safe(id, tmp, &adev->vm_manager.ids_lru, - list) { - if (amdgpu_sync_is_idle(&id->active)) { - list_move(&id->list, head); - head = &id->list; - } - } - id = list_first_entry(&adev->vm_manager.ids_lru, - struct amdgpu_vm_id, - list); - } - - r = amdgpu_sync_cycle_fences(sync, &id->active, fence); + /* Remember this submission as user of the VMID */ + r = amdgpu_sync_fence(ring->adev, &id->active, fence); if (r) goto error; @@ -269,22 +318,46 @@ int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring, fence_put(id->flushed_updates); id->flushed_updates = fence_get(updates); - id->pd_gpu_addr = pd_addr; - + id->pd_gpu_addr = job->vm_pd_addr; + id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter); list_move_tail(&id->list, &adev->vm_manager.ids_lru); - id->last_user = ring; atomic64_set(&id->owner, vm->client_id); vm->ids[ring->idx] = id; - *vm_id = id - adev->vm_manager.ids; - *vm_pd_addr = pd_addr; - trace_amdgpu_vm_grab_id(vm, ring->idx, *vm_id, *vm_pd_addr); + job->vm_id = id - adev->vm_manager.ids; + trace_amdgpu_vm_grab_id(vm, ring->idx, job->vm_id, job->vm_pd_addr); error: mutex_unlock(&adev->vm_manager.lock); return r; } +static bool amdgpu_vm_ring_has_compute_vm_bug(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + const struct amdgpu_ip_block_version *ip_block; + + if (ring->type != AMDGPU_RING_TYPE_COMPUTE) + /* only compute rings */ + return false; + + ip_block = amdgpu_get_ip_block(adev, AMD_IP_BLOCK_TYPE_GFX); + if (!ip_block) + return false; + + if (ip_block->major <= 7) { + /* gfx7 has no workaround */ + return true; + } else if (ip_block->major == 8) { + if (adev->gfx.mec_fw_version >= 673) + /* gfx8 is fixed in MEC firmware 673 */ + return false; + else + return true; + } + return false; +} + /** * amdgpu_vm_flush - hardware flush the vm * @@ -294,59 +367,52 @@ error: * * Emit a VM flush when it is necessary. */ -int amdgpu_vm_flush(struct amdgpu_ring *ring, - unsigned vm_id, uint64_t pd_addr, - uint32_t gds_base, uint32_t gds_size, - uint32_t gws_base, uint32_t gws_size, - uint32_t oa_base, uint32_t oa_size) +int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_vm_id *id = &adev->vm_manager.ids[vm_id]; + struct amdgpu_vm_id *id = &adev->vm_manager.ids[job->vm_id]; bool gds_switch_needed = ring->funcs->emit_gds_switch && ( - id->gds_base != gds_base || - id->gds_size != gds_size || - id->gws_base != gws_base || - id->gws_size != gws_size || - id->oa_base != oa_base || - id->oa_size != oa_size); + id->gds_base != job->gds_base || + id->gds_size != job->gds_size || + id->gws_base != job->gws_base || + id->gws_size != job->gws_size || + id->oa_base != job->oa_base || + id->oa_size != job->oa_size); int r; if (ring->funcs->emit_pipeline_sync && ( - pd_addr != AMDGPU_VM_NO_FLUSH || gds_switch_needed || - ring->type == AMDGPU_RING_TYPE_COMPUTE)) + job->vm_needs_flush || gds_switch_needed || + amdgpu_vm_ring_has_compute_vm_bug(ring))) amdgpu_ring_emit_pipeline_sync(ring); - if (ring->funcs->emit_vm_flush && - pd_addr != AMDGPU_VM_NO_FLUSH) { + if (ring->funcs->emit_vm_flush && (job->vm_needs_flush || + amdgpu_vm_is_gpu_reset(adev, id))) { struct fence *fence; - trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id); - amdgpu_ring_emit_vm_flush(ring, vm_id, pd_addr); + trace_amdgpu_vm_flush(job->vm_pd_addr, ring->idx, job->vm_id); + amdgpu_ring_emit_vm_flush(ring, job->vm_id, job->vm_pd_addr); + + r = amdgpu_fence_emit(ring, &fence); + if (r) + return r; mutex_lock(&adev->vm_manager.lock); - if ((id->pd_gpu_addr == pd_addr) && (id->last_user == ring)) { - r = amdgpu_fence_emit(ring, &fence); - if (r) { - mutex_unlock(&adev->vm_manager.lock); - return r; - } - fence_put(id->last_flush); - id->last_flush = fence; - } + fence_put(id->last_flush); + id->last_flush = fence; mutex_unlock(&adev->vm_manager.lock); } if (gds_switch_needed) { - id->gds_base = gds_base; - id->gds_size = gds_size; - id->gws_base = gws_base; - id->gws_size = gws_size; - id->oa_base = oa_base; - id->oa_size = oa_size; - amdgpu_ring_emit_gds_switch(ring, vm_id, - gds_base, gds_size, - gws_base, gws_size, - oa_base, oa_size); + id->gds_base = job->gds_base; + id->gds_size = job->gds_size; + id->gws_base = job->gws_base; + id->gws_size = job->gws_size; + id->oa_base = job->oa_base; + id->oa_size = job->oa_size; + amdgpu_ring_emit_gds_switch(ring, job->vm_id, + job->gds_base, job->gds_size, + job->gws_base, job->gws_size, + job->oa_base, job->oa_size); } return 0; @@ -723,7 +789,7 @@ static void amdgpu_vm_frag_ptes(struct amdgpu_device *adev, * @vm: requested vm * @start: start of GPU address range * @end: end of GPU address range - * @dst: destination address to map to + * @dst: destination address to map to, the next dst inside the function * @flags: mapping flags * * Update the page tables in the range @start - @end. @@ -737,49 +803,75 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, { const uint64_t mask = AMDGPU_VM_PTE_COUNT - 1; - uint64_t last_pe_start = ~0, last_pe_end = ~0, last_dst = ~0; - uint64_t addr; + uint64_t cur_pe_start, cur_pe_end, cur_dst; + uint64_t addr; /* next GPU address to be updated */ + uint64_t pt_idx; + struct amdgpu_bo *pt; + unsigned nptes; /* next number of ptes to be updated */ + uint64_t next_pe_start; + + /* initialize the variables */ + addr = start; + pt_idx = addr >> amdgpu_vm_block_size; + pt = vm->page_tables[pt_idx].entry.robj; + + if ((addr & ~mask) == (end & ~mask)) + nptes = end - addr; + else + nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); + + cur_pe_start = amdgpu_bo_gpu_offset(pt); + cur_pe_start += (addr & mask) * 8; + cur_pe_end = cur_pe_start + 8 * nptes; + cur_dst = dst; + + /* for next ptb*/ + addr += nptes; + dst += nptes * AMDGPU_GPU_PAGE_SIZE; /* walk over the address space and update the page tables */ - for (addr = start; addr < end; ) { - uint64_t pt_idx = addr >> amdgpu_vm_block_size; - struct amdgpu_bo *pt = vm->page_tables[pt_idx].entry.robj; - unsigned nptes; - uint64_t pe_start; + while (addr < end) { + pt_idx = addr >> amdgpu_vm_block_size; + pt = vm->page_tables[pt_idx].entry.robj; if ((addr & ~mask) == (end & ~mask)) nptes = end - addr; else nptes = AMDGPU_VM_PTE_COUNT - (addr & mask); - pe_start = amdgpu_bo_gpu_offset(pt); - pe_start += (addr & mask) * 8; - - if (last_pe_end != pe_start) { + next_pe_start = amdgpu_bo_gpu_offset(pt); + next_pe_start += (addr & mask) * 8; + if (cur_pe_end == next_pe_start) { + /* The next ptb is consecutive to current ptb. + * Don't call amdgpu_vm_frag_ptes now. + * Will update two ptbs together in future. + */ + cur_pe_end += 8 * nptes; + } else { amdgpu_vm_frag_ptes(adev, vm_update_params, - last_pe_start, last_pe_end, - last_dst, flags); + cur_pe_start, cur_pe_end, + cur_dst, flags); - last_pe_start = pe_start; - last_pe_end = pe_start + 8 * nptes; - last_dst = dst; - } else { - last_pe_end += 8 * nptes; + cur_pe_start = next_pe_start; + cur_pe_end = next_pe_start + 8 * nptes; + cur_dst = dst; } + /* for next ptb*/ addr += nptes; dst += nptes * AMDGPU_GPU_PAGE_SIZE; } - amdgpu_vm_frag_ptes(adev, vm_update_params, last_pe_start, - last_pe_end, last_dst, flags); + amdgpu_vm_frag_ptes(adev, vm_update_params, cur_pe_start, + cur_pe_end, cur_dst, flags); } /** * amdgpu_vm_bo_update_mapping - update a mapping in the vm page table * * @adev: amdgpu_device pointer + * @exclusive: fence we need to sync to * @src: address where to copy page table entries from * @pages_addr: DMA addresses to use for mapping * @vm: requested vm @@ -793,6 +885,7 @@ static void amdgpu_vm_update_ptes(struct amdgpu_device *adev, * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, + struct fence *exclusive, uint64_t src, dma_addr_t *pages_addr, struct amdgpu_vm *vm, @@ -853,6 +946,10 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, vm_update_params.ib = &job->ibs[0]; + r = amdgpu_sync_fence(adev, &job->sync, exclusive); + if (r) + goto error_free; + r = amdgpu_sync_resv(adev, &job->sync, vm->page_directory->tbo.resv, owner); if (r) @@ -889,6 +986,7 @@ error_free: * amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks * * @adev: amdgpu_device pointer + * @exclusive: fence we need to sync to * @gtt_flags: flags as they are used for GTT * @pages_addr: DMA addresses to use for mapping * @vm: requested vm @@ -902,6 +1000,7 @@ error_free: * Returns 0 for success, -EINVAL for failure. */ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, + struct fence *exclusive, uint32_t gtt_flags, dma_addr_t *pages_addr, struct amdgpu_vm *vm, @@ -932,7 +1031,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, addr += mapping->offset; if (!pages_addr || src) - return amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, + return amdgpu_vm_bo_update_mapping(adev, exclusive, + src, pages_addr, vm, start, mapping->it.last, flags, addr, fence); @@ -940,7 +1040,8 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev, uint64_t last; last = min((uint64_t)mapping->it.last, start + max_size - 1); - r = amdgpu_vm_bo_update_mapping(adev, src, pages_addr, vm, + r = amdgpu_vm_bo_update_mapping(adev, exclusive, + src, pages_addr, vm, start, last, flags, addr, fence); if (r) @@ -973,6 +1074,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping *mapping; dma_addr_t *pages_addr = NULL; uint32_t gtt_flags, flags; + struct fence *exclusive; uint64_t addr; int r; @@ -994,8 +1096,11 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, default: break; } + + exclusive = reservation_object_get_excl(bo_va->bo->tbo.resv); } else { addr = 0; + exclusive = NULL; } flags = amdgpu_ttm_tt_pte_flags(adev, bo_va->bo->tbo.ttm, mem); @@ -1007,7 +1112,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, spin_unlock(&vm->status_lock); list_for_each_entry(mapping, &bo_va->invalids, list) { - r = amdgpu_vm_bo_split_mapping(adev, gtt_flags, pages_addr, vm, + r = amdgpu_vm_bo_split_mapping(adev, exclusive, + gtt_flags, pages_addr, vm, mapping, flags, addr, &bo_va->last_pt_update); if (r) @@ -1054,7 +1160,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev, struct amdgpu_bo_va_mapping, list); list_del(&mapping->list); - r = amdgpu_vm_bo_split_mapping(adev, 0, NULL, vm, mapping, + r = amdgpu_vm_bo_split_mapping(adev, NULL, 0, NULL, vm, mapping, 0, 0, NULL); kfree(mapping); if (r) @@ -1445,6 +1551,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) amdgpu_bo_unreserve(vm->page_directory); if (r) goto error_free_page_directory; + vm->last_eviction_counter = atomic64_read(&adev->num_evictions); return 0; @@ -1516,6 +1623,10 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) &adev->vm_manager.ids_lru); } + adev->vm_manager.fence_context = fence_context_alloc(AMDGPU_MAX_RINGS); + for (i = 0; i < AMDGPU_MAX_RINGS; ++i) + adev->vm_manager.seqno[i] = 0; + atomic_set(&adev->vm_manager.vm_pte_next_ring, 0); atomic64_set(&adev->vm_manager.client_counter, 0); } diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c index ea407db1fbcf..5c33ed862695 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c @@ -50,7 +50,9 @@ #include "gmc/gmc_7_1_sh_mask.h" MODULE_FIRMWARE("radeon/bonaire_smc.bin"); +MODULE_FIRMWARE("radeon/bonaire_k_smc.bin"); MODULE_FIRMWARE("radeon/hawaii_smc.bin"); +MODULE_FIRMWARE("radeon/hawaii_k_smc.bin"); #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b @@ -736,19 +738,19 @@ static int ci_enable_didt(struct amdgpu_device *adev, bool enable) if (pi->caps_sq_ramping || pi->caps_db_ramping || pi->caps_td_ramping || pi->caps_tcp_ramping) { - gfx_v7_0_enter_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->enter_safe_mode(adev); if (enable) { ret = ci_program_pt_config_registers(adev, didt_config_ci); if (ret) { - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return ret; } } ci_do_enable_didt(adev, enable); - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; @@ -3636,6 +3638,10 @@ static int ci_setup_default_dpm_tables(struct amdgpu_device *adev) ci_setup_default_pcie_tables(adev); + /* save a copy of the default DPM table */ + memcpy(&(pi->golden_dpm_table), &(pi->dpm_table), + sizeof(struct ci_dpm_table)); + return 0; } @@ -5754,10 +5760,18 @@ static int ci_dpm_init_microcode(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_BONAIRE: - chip_name = "bonaire"; + if ((adev->pdev->revision == 0x80) || + (adev->pdev->revision == 0x81) || + (adev->pdev->device == 0x665f)) + chip_name = "bonaire_k"; + else + chip_name = "bonaire"; break; case CHIP_HAWAII: - chip_name = "hawaii"; + if (adev->pdev->revision == 0x80) + chip_name = "hawaii_k"; + else + chip_name = "hawaii"; break; case CHIP_KAVERI: case CHIP_KABINI: @@ -6221,6 +6235,9 @@ static int ci_dpm_sw_fini(void *handle) ci_dpm_fini(adev); mutex_unlock(&adev->pm.mutex); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } @@ -6401,6 +6418,186 @@ static int ci_dpm_set_powergating_state(void *handle, return 0; } +static int ci_dpm_print_clock_levels(struct amdgpu_device *adev, + enum pp_clock_type type, char *buf) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table; + struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table; + struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table; + + int i, now, size = 0; + uint32_t clock, pcie_speed; + + switch (type) { + case PP_SCLK: + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency); + clock = RREG32(mmSMC_MSG_ARG_0); + + for (i = 0; i < sclk_table->count; i++) { + if (clock > sclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < sclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, sclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_MCLK: + amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency); + clock = RREG32(mmSMC_MSG_ARG_0); + + for (i = 0; i < mclk_table->count; i++) { + if (clock > mclk_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < mclk_table->count; i++) + size += sprintf(buf + size, "%d: %uMhz %s\n", + i, mclk_table->dpm_levels[i].value / 100, + (i == now) ? "*" : ""); + break; + case PP_PCIE: + pcie_speed = ci_get_current_pcie_speed(adev); + for (i = 0; i < pcie_table->count; i++) { + if (pcie_speed != pcie_table->dpm_levels[i].value) + continue; + break; + } + now = i; + + for (i = 0; i < pcie_table->count; i++) + size += sprintf(buf + size, "%d: %s %s\n", i, + (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" : + (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" : + (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "", + (i == now) ? "*" : ""); + break; + default: + break; + } + + return size; +} + +static int ci_dpm_force_clock_level(struct amdgpu_device *adev, + enum pp_clock_type type, uint32_t mask) +{ + struct ci_power_info *pi = ci_get_pi(adev); + + if (adev->pm.dpm.forced_level + != AMDGPU_DPM_FORCED_LEVEL_MANUAL) + return -EINVAL; + + switch (type) { + case PP_SCLK: + if (!pi->sclk_dpm_key_disabled) + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask); + break; + + case PP_MCLK: + if (!pi->mclk_dpm_key_disabled) + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_MCLKDPM_SetEnabledMask, + pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask); + break; + + case PP_PCIE: + { + uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask; + uint32_t level = 0; + + while (tmp >>= 1) + level++; + + if (!pi->pcie_dpm_key_disabled) + amdgpu_ci_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_PCIeDPM_ForceLevel, + level); + break; + } + default: + break; + } + + return 0; +} + +static int ci_dpm_get_sclk_od(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table); + struct ci_single_dpm_table *golden_sclk_table = + &(pi->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); + struct ci_single_dpm_table *golden_sclk_table = + &(pi->golden_dpm_table.sclk_table); + + if (value > 20) + value = 20; + + ps->performance_levels[ps->performance_level_count - 1].sclk = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int ci_dpm_get_mclk_od(struct amdgpu_device *adev) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table); + struct ci_single_dpm_table *golden_mclk_table = + &(pi->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) +{ + struct ci_power_info *pi = ci_get_pi(adev); + struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps); + struct ci_single_dpm_table *golden_mclk_table = + &(pi->golden_dpm_table.mclk_table); + + if (value > 20) + value = 20; + + ps->performance_levels[ps->performance_level_count - 1].mclk = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} + const struct amd_ip_funcs ci_dpm_ip_funcs = { .name = "ci_dpm", .early_init = ci_dpm_early_init, @@ -6435,6 +6632,12 @@ static const struct amdgpu_dpm_funcs ci_dpm_funcs = { .get_fan_control_mode = &ci_dpm_get_fan_control_mode, .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent, .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent, + .print_clock_levels = ci_dpm_print_clock_levels, + .force_clock_level = ci_dpm_force_clock_level, + .get_sclk_od = ci_dpm_get_sclk_od, + .set_sclk_od = ci_dpm_set_sclk_od, + .get_mclk_od = ci_dpm_get_mclk_od, + .set_mclk_od = ci_dpm_set_mclk_od, }; static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h index faccc30c93bf..91be2996ae7c 100644 --- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.h +++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.h @@ -193,6 +193,7 @@ struct ci_pt_defaults { struct ci_power_info { struct ci_dpm_table dpm_table; + struct ci_dpm_table golden_dpm_table; u32 voltage_control; u32 mvdd_control; u32 vddci_control; diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 07bc795a4ca9..a7de4d18ac94 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -962,6 +962,12 @@ static bool cik_read_bios_from_rom(struct amdgpu_device *adev, return true; } +static u32 cik_get_virtual_caps(struct amdgpu_device *adev) +{ + /* CIK does not support SR-IOV */ + return 0; +} + static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = { {mmGRBM_STATUS, false}, {mmGB_ADDR_CONFIG, false}, @@ -1029,12 +1035,12 @@ static uint32_t cik_read_indexed_register(struct amdgpu_device *adev, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v7_0_select_se_sh(adev, se_num, sh_num); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); return val; } @@ -1152,10 +1158,11 @@ static void kv_restore_regs_for_reset(struct amdgpu_device *adev, WREG32(mmGMCON_RENG_EXECUTE, save->gmcon_reng_execute); } -static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) +static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) { struct kv_reset_save_regs kv_save = { 0 }; u32 i; + int r = -EINVAL; dev_info(adev->dev, "GPU pci config reset\n"); @@ -1171,14 +1178,20 @@ static void cik_gpu_pci_config_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) + if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { + /* enable BM */ + pci_set_master(adev->pdev); + r = 0; break; + } udelay(1); } /* does asic init need to be run first??? */ if (adev->flags & AMD_IS_APU) kv_restore_regs_for_reset(adev, &kv_save); + + return r; } static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) @@ -1204,13 +1217,14 @@ static void cik_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hu */ static int cik_asic_reset(struct amdgpu_device *adev) { + int r; cik_set_bios_scratch_engine_hung(adev, true); - cik_gpu_pci_config_reset(adev); + r = cik_gpu_pci_config_reset(adev); cik_set_bios_scratch_engine_hung(adev, false); - return 0; + return r; } static int cik_set_uvd_clock(struct amdgpu_device *adev, u32 clock, @@ -2007,9 +2021,7 @@ static const struct amdgpu_asic_funcs cik_asic_funcs = .get_xclk = &cik_get_xclk, .set_uvd_clocks = &cik_set_uvd_clocks, .set_vce_clocks = &cik_set_vce_clocks, - /* these should be moved to their own ip modules */ - .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, - .wait_for_mc_idle = &gmc_v7_0_mc_wait_for_idle, + .get_virtual_caps = &cik_get_virtual_caps, }; static int cik_common_early_init(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 518dca43b133..46aca16a40aa 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -66,6 +66,16 @@ MODULE_FIRMWARE("radeon/mullins_sdma1.bin"); u32 amdgpu_cik_gpu_check_soft_reset(struct amdgpu_device *adev); + +static void cik_sdma_free_microcode(struct amdgpu_device *adev) +{ + int i; + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + } +} + /* * sDMA - System DMA * Starting with CIK, the GPU has new asynchronous @@ -214,17 +224,6 @@ static void cik_sdma_ring_emit_ib(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 extra_bits = vm_id & 0xf; - u32 next_rptr = ring->wptr + 5; - - while ((next_rptr & 7) != 4) - next_rptr++; - - next_rptr += 4; - amdgpu_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, 1); /* number of DWs to follow */ - amdgpu_ring_write(ring, next_rptr); /* IB packet must end on a 8 DW boundary */ cik_sdma_ring_insert_nop(ring, (12 - (ring->wptr & 7)) % 8); @@ -419,6 +418,8 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], @@ -446,7 +447,12 @@ static int cik_sdma_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); ring->ready = true; + } + + cik_sdma_enable(adev, true); + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; @@ -529,8 +535,8 @@ static int cik_sdma_start(struct amdgpu_device *adev) if (r) return r; - /* unhalt the MEs */ - cik_sdma_enable(adev, true); + /* halt the engine before programing */ + cik_sdma_enable(adev, false); /* start the gfx rings and rlc compute queues */ r = cik_sdma_gfx_resume(adev); @@ -998,6 +1004,7 @@ static int cik_sdma_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + cik_sdma_free_microcode(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c index 933e425a8154..8ba07e79d4cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c @@ -2219,6 +2219,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate) } } } else { /*pi->caps_vce_pg*/ + pi->vce_power_gated = gate; cz_update_vce_dpm(adev); cz_enable_vce_dpm(adev, !gate); } diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index c90408bc0fde..d4bf133908b1 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -307,11 +307,10 @@ static void dce_v11_0_page_flip(struct amdgpu_device *adev, struct amdgpu_crtc *amdgpu_crtc = adev->mode_info.crtcs[crtc_id]; u32 tmp; - /* flip at hsync for async, default is vsync */ - /* use UPDATE_IMMEDIATE_EN instead for async? */ + /* flip immediate for async, default is vsync */ tmp = RREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset); tmp = REG_SET_FIELD(tmp, GRPH_FLIP_CONTROL, - GRPH_SURFACE_UPDATE_H_RETRACE_EN, async ? 1 : 0); + GRPH_SURFACE_UPDATE_IMMEDIATE_EN, async ? 1 : 0); WREG32(mmGRPH_FLIP_CONTROL + amdgpu_crtc->crtc_offset, tmp); /* update the scanout addresses */ WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + amdgpu_crtc->crtc_offset, diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 300ff4aab0fd..4fdfab1e9200 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -526,36 +526,16 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, crtc_enabled = REG_GET_FIELD(RREG32(mmCRTC_CONTROL + crtc_offsets[i]), CRTC_CONTROL, CRTC_MASTER_EN); if (crtc_enabled) { -#if 0 - u32 frame_count; - int j; - +#if 1 save->crtc_enabled[i] = true; tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); if (REG_GET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN) == 0) { - amdgpu_display_vblank_wait(adev, i); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); + /*it is correct only for RGB ; black is 0*/ + WREG32(mmCRTC_BLANK_DATA_COLOR + crtc_offsets[i], 0); tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1); WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - } - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 1); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK) == 0) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 1); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); } + mdelay(20); #else /* XXX this is a hack to avoid strange behavior with EFI on certain systems */ WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); @@ -575,55 +555,22 @@ static void dce_v8_0_stop_mc_access(struct amdgpu_device *adev, static void dce_v8_0_resume_mc_access(struct amdgpu_device *adev, struct amdgpu_mode_mc_save *save) { - u32 tmp, frame_count; - int i, j; + u32 tmp; + int i; /* update crtc base addresses */ for (i = 0; i < adev->mode_info.num_crtc; i++) { WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], upper_32_bits(adev->mc.vram_start)); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS_HIGH + crtc_offsets[i], - upper_32_bits(adev->mc.vram_start)); WREG32(mmGRPH_PRIMARY_SURFACE_ADDRESS + crtc_offsets[i], (u32)adev->mc.vram_start); - WREG32(mmGRPH_SECONDARY_SURFACE_ADDRESS + crtc_offsets[i], - (u32)adev->mc.vram_start); if (save->crtc_enabled[i]) { - tmp = RREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE) != 3) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_MODE, MASTER_UPDATE_MODE, 3); - WREG32(mmMASTER_UPDATE_MODE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, GRPH_UPDATE, GRPH_UPDATE_LOCK, 0); - WREG32(mmGRPH_UPDATE + crtc_offsets[i], tmp); - } - tmp = RREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK)) { - tmp = REG_SET_FIELD(tmp, MASTER_UPDATE_LOCK, MASTER_UPDATE_LOCK, 0); - WREG32(mmMASTER_UPDATE_LOCK + crtc_offsets[i], tmp); - } - for (j = 0; j < adev->usec_timeout; j++) { - tmp = RREG32(mmGRPH_UPDATE + crtc_offsets[i]); - if (REG_GET_FIELD(tmp, GRPH_UPDATE, GRPH_SURFACE_UPDATE_PENDING) == 0) - break; - udelay(1); - } tmp = RREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i]); tmp = REG_SET_FIELD(tmp, CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 0); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 1); WREG32(mmCRTC_BLANK_CONTROL + crtc_offsets[i], tmp); - WREG32(mmCRTC_UPDATE_LOCK + crtc_offsets[i], 0); - /* wait for the next frame */ - frame_count = amdgpu_display_vblank_get_counter(adev, i); - for (j = 0; j < adev->usec_timeout; j++) { - if (amdgpu_display_vblank_get_counter(adev, i) != frame_count) - break; - udelay(1); - } } + mdelay(20); } WREG32(mmVGA_MEMORY_BASE_ADDRESS_HIGH, upper_32_bits(adev->mc.vram_start)); diff --git a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c index 245cabf06575..ed03b75175d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/fiji_dpm.c @@ -72,6 +72,11 @@ static int fiji_dpm_sw_init(void *handle) static int fiji_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index 7f18a53ab53a..f6bd9465dbdc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -991,6 +991,22 @@ out: return err; } +static void gfx_v7_0_free_microcode(struct amdgpu_device *adev) +{ + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.ce_fw); + adev->gfx.ce_fw = NULL; + release_firmware(adev->gfx.mec_fw); + adev->gfx.mec_fw = NULL; + release_firmware(adev->gfx.mec2_fw); + adev->gfx.mec2_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; +} + /** * gfx_v7_0_tiling_mode_table_init - init the hw tiling table * @@ -1567,9 +1583,15 @@ static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device *adev) * registers are instanced per SE or SH. 0xffffffff means * broadcast to all SEs or SHs (CIK). */ -void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) +static void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, + u32 se_num, u32 sh_num, u32 instance) { - u32 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK; + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) data |= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK | @@ -1643,13 +1665,13 @@ static void gfx_v7_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); data = gfx_v7_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -1730,7 +1752,7 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ @@ -2034,17 +2056,6 @@ static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - if (ctx_switch) - next_rptr += 2; - - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); /* insert SWITCH_BUFFER packet before first IB in the ring frame */ if (ctx_switch) { @@ -2073,22 +2084,9 @@ static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, unsigned vm_id, bool ctx_switch) { - u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - control |= INDIRECT_BUFFER_VALID; - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); - - header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); - - control |= ib->length_dw | (vm_id << 24); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); - amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | @@ -3205,7 +3203,8 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev) } } adev->gfx.rlc.cs_data = ci_cs_data; - adev->gfx.rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4; + adev->gfx.rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */ + adev->gfx.rlc.cp_table_size += 64 * 1024; /* GDS */ src_ptr = adev->gfx.rlc.reg_list; dws = adev->gfx.rlc.reg_list_size; @@ -3363,7 +3362,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v7_0_select_se_sh(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3371,7 +3370,7 @@ static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3418,7 +3417,7 @@ static u32 gfx_v7_0_halt_rlc(struct amdgpu_device *adev) return orig; } -void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) +static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) { u32 tmp, i, mask; @@ -3440,7 +3439,7 @@ void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev) } } -void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) +static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) { u32 tmp; @@ -3455,7 +3454,7 @@ void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev) * * Halt the RLC ME (MicroEngine) (CIK). */ -void gfx_v7_0_rlc_stop(struct amdgpu_device *adev) +static void gfx_v7_0_rlc_stop(struct amdgpu_device *adev) { WREG32(mmRLC_CNTL, 0); @@ -3531,7 +3530,7 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev) WREG32(mmRLC_LB_CNTR_MAX, 0x00008000); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_LB_INIT_CU_MASK, 0xffffffff); WREG32(mmRLC_LB_PARAMS, 0x00600408); WREG32(mmRLC_LB_CNTL, 0x80000004); @@ -3571,7 +3570,7 @@ static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); tmp2 = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3622,7 +3621,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | @@ -3673,7 +3672,7 @@ static void gfx_v7_0_enable_mgcg(struct amdgpu_device *adev, bool enable) tmp = gfx_v7_0_halt_rlc(adev); mutex_lock(&adev->grbm_idx_mutex); - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); data = RLC_SERDES_WR_CTRL__BPM_ADDR_MASK | RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK; @@ -3851,6 +3850,20 @@ static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device *adev, } } +static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, + u32 bitmap) +{ + u32 data; + + if (!bitmap) + return; + + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + + WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); +} + static u32 gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; @@ -4107,7 +4120,7 @@ static void gfx_v7_0_fini_pg(struct amdgpu_device *adev) * Fetches a GPU clock counter snapshot (SI). * Returns the 64 bit clock counter snapshot. */ -uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev) +static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; @@ -4167,12 +4180,24 @@ static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } +static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v7_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v7_0_select_se_sh, +}; + +static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = { + .enter_safe_mode = gfx_v7_0_enter_rlc_safe_mode, + .exit_safe_mode = gfx_v7_0_exit_rlc_safe_mode +}; + static int gfx_v7_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX7_NUM_GFX_RINGS; adev->gfx.num_compute_rings = GFX7_NUM_COMPUTE_RINGS; + adev->gfx.funcs = &gfx_v7_0_gfx_funcs; + adev->gfx.rlc.funcs = &gfx_v7_0_rlc_funcs; gfx_v7_0_set_ring_funcs(adev); gfx_v7_0_set_irq_funcs(adev); gfx_v7_0_set_gds_init(adev); @@ -4489,6 +4514,7 @@ static int gfx_v7_0_sw_fini(void *handle) gfx_v7_0_cp_compute_fini(adev); gfx_v7_0_rlc_fini(adev); gfx_v7_0_mec_fini(adev); + gfx_v7_0_free_microcode(adev); return 0; } @@ -4816,7 +4842,7 @@ static int gfx_v7_0_eop_irq(struct amdgpu_device *adev, case 2: for (i = 0; i < adev->gfx.num_compute_rings; i++) { ring = &adev->gfx.compute_ring[i]; - if ((ring->me == me_id) & (ring->pipe == pipe_id)) + if ((ring->me == me_id) && (ring->pipe == pipe_id)) amdgpu_fence_process(ring); } break; @@ -5015,16 +5041,22 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + unsigned disable_masks[4 * 2]; memset(cu_info, 0, sizeof(*cu_info)); + amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; ao_bitmap = 0; counter = 0; - gfx_v7_0_select_se_sh(adev, i, j); + gfx_v7_0_select_se_sh(adev, i, j, 0xffffffff); + if (i < 4 && j < 2) + gfx_v7_0_set_user_cu_inactive_bitmap( + adev, disable_masks[i * 2 + j]); bitmap = gfx_v7_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; @@ -5040,7 +5072,7 @@ static void gfx_v7_0_get_cu_info(struct amdgpu_device *adev) ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); } } - gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v7_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h index e747aa935c88..94e3ea147c26 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.h @@ -26,11 +26,4 @@ extern const struct amd_ip_funcs gfx_v7_0_ip_funcs; -/* XXX these shouldn't be exported */ -void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device *adev); -void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device *adev); -void gfx_v7_0_rlc_stop(struct amdgpu_device *adev); -uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device *adev); -void gfx_v7_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index f19bab68fd83..c30b6ac25d89 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -297,7 +297,8 @@ static const u32 polaris11_golden_common_all[] = static const u32 golden_settings_polaris10_a11[] = { mmATC_MISC_CG, 0x000c0fc0, 0x000c0200, - mmCB_HW_CONTROL, 0xfffdf3cf, 0x00006208, + mmCB_HW_CONTROL, 0xfffdf3cf, 0x00007208, + mmCB_HW_CONTROL_2, 0, 0x0f000000, mmCB_HW_CONTROL_3, 0x000001ff, 0x00000040, mmDB_DEBUG2, 0xf00fffff, 0x00000400, mmPA_SC_ENHANCE, 0xffffffff, 0x20000001, @@ -836,6 +837,26 @@ err1: return r; } + +static void gfx_v8_0_free_microcode(struct amdgpu_device *adev) { + release_firmware(adev->gfx.pfp_fw); + adev->gfx.pfp_fw = NULL; + release_firmware(adev->gfx.me_fw); + adev->gfx.me_fw = NULL; + release_firmware(adev->gfx.ce_fw); + adev->gfx.ce_fw = NULL; + release_firmware(adev->gfx.rlc_fw); + adev->gfx.rlc_fw = NULL; + release_firmware(adev->gfx.mec_fw); + adev->gfx.mec_fw = NULL; + if ((adev->asic_type != CHIP_STONEY) && + (adev->asic_type != CHIP_TOPAZ)) + release_firmware(adev->gfx.mec2_fw); + adev->gfx.mec2_fw = NULL; + + kfree(adev->gfx.rlc.register_list_format); +} + static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) { const char *chip_name; @@ -1129,6 +1150,71 @@ static void gfx_v8_0_get_csb_buffer(struct amdgpu_device *adev, buffer[count++] = cpu_to_le32(0); } +static void cz_init_cp_jump_table(struct amdgpu_device *adev) +{ + const __le32 *fw_data; + volatile u32 *dst_ptr; + int me, i, max_me = 4; + u32 bo_offset = 0; + u32 table_offset, table_size; + + if (adev->asic_type == CHIP_CARRIZO) + max_me = 5; + + /* write the cp table buffer */ + dst_ptr = adev->gfx.rlc.cp_table_ptr; + for (me = 0; me < max_me; me++) { + if (me == 0) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data; + fw_data = (const __le32 *) + (adev->gfx.ce_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 1) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data; + fw_data = (const __le32 *) + (adev->gfx.pfp_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 2) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; + fw_data = (const __le32 *) + (adev->gfx.me_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 3) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } else if (me == 4) { + const struct gfx_firmware_header_v1_0 *hdr = + (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; + fw_data = (const __le32 *) + (adev->gfx.mec2_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + table_offset = le32_to_cpu(hdr->jt_offset); + table_size = le32_to_cpu(hdr->jt_size); + } + + for (i = 0; i < table_size; i ++) { + dst_ptr[bo_offset + i] = + cpu_to_le32(le32_to_cpu(fw_data[table_offset + i])); + } + + bo_offset += table_size; + } +} + static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) { int r; @@ -1144,6 +1230,18 @@ static void gfx_v8_0_rlc_fini(struct amdgpu_device *adev) amdgpu_bo_unref(&adev->gfx.rlc.clear_state_obj); adev->gfx.rlc.clear_state_obj = NULL; } + + /* jump table block */ + if (adev->gfx.rlc.cp_table_obj) { + r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); + if (unlikely(r != 0)) + dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); + amdgpu_bo_unpin(adev->gfx.rlc.cp_table_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + + amdgpu_bo_unref(&adev->gfx.rlc.cp_table_obj); + adev->gfx.rlc.cp_table_obj = NULL; + } } static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) @@ -1200,6 +1298,46 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev) amdgpu_bo_unreserve(adev->gfx.rlc.clear_state_obj); } + if ((adev->asic_type == CHIP_CARRIZO) || + (adev->asic_type == CHIP_STONEY)) { + adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */ + if (adev->gfx.rlc.cp_table_obj == NULL) { + r = amdgpu_bo_create(adev, adev->gfx.rlc.cp_table_size, PAGE_SIZE, true, + AMDGPU_GEM_DOMAIN_VRAM, + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, + NULL, NULL, + &adev->gfx.rlc.cp_table_obj); + if (r) { + dev_warn(adev->dev, "(%d) create RLC cp table bo failed\n", r); + return r; + } + } + + r = amdgpu_bo_reserve(adev->gfx.rlc.cp_table_obj, false); + if (unlikely(r != 0)) { + dev_warn(adev->dev, "(%d) reserve RLC cp table bo failed\n", r); + return r; + } + r = amdgpu_bo_pin(adev->gfx.rlc.cp_table_obj, AMDGPU_GEM_DOMAIN_VRAM, + &adev->gfx.rlc.cp_table_gpu_addr); + if (r) { + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + dev_warn(adev->dev, "(%d) pin RLC cp_table bo failed\n", r); + return r; + } + r = amdgpu_bo_kmap(adev->gfx.rlc.cp_table_obj, (void **)&adev->gfx.rlc.cp_table_ptr); + if (r) { + dev_warn(adev->dev, "(%d) map RLC cp table bo failed\n", r); + return r; + } + + cz_init_cp_jump_table(adev); + + amdgpu_bo_kunmap(adev->gfx.rlc.cp_table_obj); + amdgpu_bo_unreserve(adev->gfx.rlc.cp_table_obj); + + } + return 0; } @@ -1983,7 +2121,7 @@ static int gfx_v8_0_sw_fini(void *handle) gfx_v8_0_rlc_fini(adev); - kfree(adev->gfx.rlc.register_list_format); + gfx_v8_0_free_microcode(adev); return 0; } @@ -3308,9 +3446,15 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev) } } -void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num) +static void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, + u32 se_num, u32 sh_num, u32 instance) { - u32 data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + u32 data; + + if (instance == 0xffffffff) + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); + else + data = REG_SET_FIELD(0, GRBM_GFX_INDEX, INSTANCE_INDEX, instance); if ((se_num == 0xffffffff) && (sh_num == 0xffffffff)) { data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SH_BROADCAST_WRITES, 1); @@ -3360,13 +3504,13 @@ static void gfx_v8_0_setup_rb(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); data = gfx_v8_0_get_rb_active_bitmap(adev); active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) * rb_bitmap_width_per_sh); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); adev->gfx.config.backend_enable_mask = active_rbs; @@ -3470,7 +3614,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) * making sure that the following register writes will be broadcasted * to all the shaders */ - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmPA_SC_FIFO_SIZE, (adev->gfx.config.sc_prim_fifo_size_frontend << @@ -3493,7 +3637,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { - gfx_v8_0_select_se_sh(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); for (k = 0; k < adev->usec_timeout; k++) { if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY) == 0) break; @@ -3501,7 +3645,7 @@ static void gfx_v8_0_wait_for_rlc_serdes(struct amdgpu_device *adev) } } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); mask = RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK | @@ -3662,13 +3806,13 @@ static void gfx_v8_0_enable_save_restore_machine(struct amdgpu_device *adev) WREG32(mmRLC_SRM_CNTL, data); } -static void polaris11_init_power_gating(struct amdgpu_device *adev) +static void gfx_v8_0_init_power_gating(struct amdgpu_device *adev) { uint32_t data; if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | - AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_DMG)) { + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG)) { data = RREG32(mmCP_RB_WPTR_POLL_CNTL); data &= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK; data |= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT); @@ -3693,6 +3837,53 @@ static void polaris11_init_power_gating(struct amdgpu_device *adev) } } +static void cz_enable_sck_slow_down_on_power_up(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void cz_enable_sck_slow_down_on_power_down(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void cz_enable_cp_power_gating(struct amdgpu_device *adev, bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data &= ~RLC_PG_CNTL__CP_PG_DISABLE_MASK; + else + data |= RLC_PG_CNTL__CP_PG_DISABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + static void gfx_v8_0_init_pg(struct amdgpu_device *adev) { if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | @@ -3705,8 +3896,25 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev) gfx_v8_0_init_save_restore_list(adev); gfx_v8_0_enable_save_restore_machine(adev); - if (adev->asic_type == CHIP_POLARIS11) - polaris11_init_power_gating(adev); + if ((adev->asic_type == CHIP_CARRIZO) || + (adev->asic_type == CHIP_STONEY)) { + WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); + gfx_v8_0_init_power_gating(adev); + WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); + if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { + cz_enable_sck_slow_down_on_power_up(adev, true); + cz_enable_sck_slow_down_on_power_down(adev, true); + } else { + cz_enable_sck_slow_down_on_power_up(adev, false); + cz_enable_sck_slow_down_on_power_down(adev, false); + } + if (adev->pg_flags & AMD_PG_SUPPORT_CP) + cz_enable_cp_power_gating(adev, true); + else + cz_enable_cp_power_gating(adev, false); + } else if (adev->asic_type == CHIP_POLARIS11) { + gfx_v8_0_init_power_gating(adev); + } } } @@ -3974,11 +4182,15 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev) amdgpu_ring_write(ring, 0x3a00161a); amdgpu_ring_write(ring, 0x0000002e); break; - case CHIP_TOPAZ: case CHIP_CARRIZO: amdgpu_ring_write(ring, 0x00000002); amdgpu_ring_write(ring, 0x00000000); break; + case CHIP_TOPAZ: + amdgpu_ring_write(ring, adev->gfx.config.num_rbs == 1 ? + 0x00000000 : 0x00000002); + amdgpu_ring_write(ring, 0x00000000); + break; case CHIP_STONEY: amdgpu_ring_write(ring, 0x00000000); amdgpu_ring_write(ring, 0x00000000); @@ -4941,7 +5153,7 @@ static int gfx_v8_0_soft_reset(void *handle) * Fetches a GPU clock counter snapshot. * Returns the 64 bit clock counter snapshot. */ -uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev) +static uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev) { uint64_t clock; @@ -5001,12 +5213,18 @@ static void gfx_v8_0_ring_emit_gds_switch(struct amdgpu_ring *ring, amdgpu_ring_write(ring, (1 << (oa_size + oa_base)) - (1 << oa_base)); } +static const struct amdgpu_gfx_funcs gfx_v8_0_gfx_funcs = { + .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, + .select_se_sh = &gfx_v8_0_select_se_sh, +}; + static int gfx_v8_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS; adev->gfx.num_compute_rings = GFX8_NUM_COMPUTE_RINGS; + adev->gfx.funcs = &gfx_v8_0_gfx_funcs; gfx_v8_0_set_ring_funcs(adev); gfx_v8_0_set_irq_funcs(adev); gfx_v8_0_set_gds_init(adev); @@ -5039,51 +5257,43 @@ static int gfx_v8_0_late_init(void *handle) return 0; } -static void polaris11_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, - bool enable) +static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev, + bool enable) { uint32_t data, temp; - /* Send msg to SMU via Powerplay */ - amdgpu_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_SMC, - enable ? AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); + if (adev->asic_type == CHIP_POLARIS11) + /* Send msg to SMU via Powerplay */ + amdgpu_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_SMC, + enable ? + AMD_PG_STATE_GATE : AMD_PG_STATE_UNGATE); - if (enable) { - /* Enable static MGPG */ - temp = data = RREG32(mmRLC_PG_CNTL); + temp = data = RREG32(mmRLC_PG_CNTL); + /* Enable static MGPG */ + if (enable) data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } else { - temp = data = RREG32(mmRLC_PG_CNTL); + else data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK; - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } + if (temp != data) + WREG32(mmRLC_PG_CNTL, data); } -static void polaris11_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, - bool enable) +static void gfx_v8_0_enable_gfx_dynamic_mg_power_gating(struct amdgpu_device *adev, + bool enable) { uint32_t data, temp; - if (enable) { - /* Enable dynamic MGPG */ - temp = data = RREG32(mmRLC_PG_CNTL); + temp = data = RREG32(mmRLC_PG_CNTL); + /* Enable dynamic MGPG */ + if (enable) data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } else { - temp = data = RREG32(mmRLC_PG_CNTL); + else data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK; - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } + if (temp != data) + WREG32(mmRLC_PG_CNTL, data); } static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *adev, @@ -5091,19 +5301,63 @@ static void polaris11_enable_gfx_quick_mg_power_gating(struct amdgpu_device *ade { uint32_t data, temp; - if (enable) { - /* Enable quick PG */ - temp = data = RREG32(mmRLC_PG_CNTL); - data |= 0x100000; + temp = data = RREG32(mmRLC_PG_CNTL); + /* Enable quick PG */ + if (enable) + data |= RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__QUICK_PG_ENABLE_MASK; - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); - } else { - temp = data = RREG32(mmRLC_PG_CNTL); - data &= ~0x100000; + if (temp != data) + WREG32(mmRLC_PG_CNTL, data); +} - if (temp != data) - WREG32(mmRLC_PG_CNTL, data); +static void cz_enable_gfx_cg_power_gating(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); +} + +static void cz_enable_gfx_pipeline_power_gating(struct amdgpu_device *adev, + bool enable) +{ + u32 data, orig; + + orig = data = RREG32(mmRLC_PG_CNTL); + + if (enable) + data |= RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; + else + data &= ~RLC_PG_CNTL__GFX_PIPELINE_PG_ENABLE_MASK; + + if (orig != data) + WREG32(mmRLC_PG_CNTL, data); + + /* Read any GFX register to wake up GFX. */ + if (!enable) + data = RREG32(mmDB_RENDER_CONTROL); +} + +static void cz_update_gfx_cg_power_gating(struct amdgpu_device *adev, + bool enable) +{ + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) && enable) { + cz_enable_gfx_cg_power_gating(adev, true); + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PIPELINE) + cz_enable_gfx_pipeline_power_gating(adev, true); + } else { + cz_enable_gfx_cg_power_gating(adev, false); + cz_enable_gfx_pipeline_power_gating(adev, false); } } @@ -5111,21 +5365,42 @@ static int gfx_v8_0_set_powergating_state(void *handle, enum amd_powergating_state state) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + bool enable = (state == AMD_PG_STATE_GATE) ? true : false; if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) return 0; switch (adev->asic_type) { + case CHIP_CARRIZO: + case CHIP_STONEY: + if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) + cz_update_gfx_cg_power_gating(adev, enable); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false); + break; case CHIP_POLARIS11: - if (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) - polaris11_enable_gfx_static_mg_power_gating(adev, - state == AMD_PG_STATE_GATE ? true : false); - else if (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) - polaris11_enable_gfx_dynamic_mg_power_gating(adev, - state == AMD_PG_STATE_GATE ? true : false); + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_static_mg_power_gating(adev, false); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG) && enable) + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, true); + else + gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false); + + if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_QUICK_MG) && enable) + polaris11_enable_gfx_quick_mg_power_gating(adev, true); else - polaris11_enable_gfx_quick_mg_power_gating(adev, - state == AMD_PG_STATE_GATE ? true : false); + polaris11_enable_gfx_quick_mg_power_gating(adev, false); break; default: break; @@ -5139,7 +5414,7 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, { uint32_t data; - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK, 0xffffffff); WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK, 0xffffffff); @@ -5527,6 +5802,8 @@ static void gfx_v8_0_update_coarse_grain_clock_gating(struct amdgpu_device *adev WREG32(mmRLC_CGCG_CGLS_CTRL, data); } + gfx_v8_0_wait_for_rlc_serdes(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } static int gfx_v8_0_update_gfx_clock_gating(struct amdgpu_device *adev, @@ -5652,17 +5929,6 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - if (ctx_switch) - next_rptr += 2; - - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); /* insert SWITCH_BUFFER packet before first IB in the ring frame */ if (ctx_switch) { @@ -5691,23 +5957,9 @@ static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring, struct amdgpu_ib *ib, unsigned vm_id, bool ctx_switch) { - u32 header, control = 0; - u32 next_rptr = ring->wptr + 5; - - control |= INDIRECT_BUFFER_VALID; - - next_rptr += 4; - amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); - amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM); - amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff); - amdgpu_ring_write(ring, next_rptr); - - header = PACKET3(PACKET3_INDIRECT_BUFFER, 2); + u32 control = INDIRECT_BUFFER_VALID | ib->length_dw | (vm_id << 24); - control |= ib->length_dw | (vm_id << 24); - - amdgpu_ring_write(ring, header); + amdgpu_ring_write(ring, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); amdgpu_ring_write(ring, #ifdef __BIG_ENDIAN (2 << 0) | @@ -6160,9 +6412,9 @@ static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev) { switch (adev->asic_type) { case CHIP_TOPAZ: - case CHIP_STONEY: adev->gfx.rlc.funcs = &iceland_rlc_funcs; break; + case CHIP_STONEY: case CHIP_CARRIZO: adev->gfx.rlc.funcs = &cz_rlc_funcs; break; @@ -6200,6 +6452,20 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev) } } +static void gfx_v8_0_set_user_cu_inactive_bitmap(struct amdgpu_device *adev, + u32 bitmap) +{ + u32 data; + + if (!bitmap) + return; + + data = bitmap << GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT; + data &= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK; + + WREG32(mmGC_USER_SHADER_ARRAY_CONFIG, data); +} + static u32 gfx_v8_0_get_cu_active_bitmap(struct amdgpu_device *adev) { u32 data, mask; @@ -6220,16 +6486,22 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) int i, j, k, counter, active_cu_number = 0; u32 mask, bitmap, ao_bitmap, ao_cu_mask = 0; struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info; + unsigned disable_masks[4 * 2]; memset(cu_info, 0, sizeof(*cu_info)); + amdgpu_gfx_parse_disable_cu(disable_masks, 4, 2); + mutex_lock(&adev->grbm_idx_mutex); for (i = 0; i < adev->gfx.config.max_shader_engines; i++) { for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) { mask = 1; ao_bitmap = 0; counter = 0; - gfx_v8_0_select_se_sh(adev, i, j); + gfx_v8_0_select_se_sh(adev, i, j, 0xffffffff); + if (i < 4 && j < 2) + gfx_v8_0_set_user_cu_inactive_bitmap( + adev, disable_masks[i * 2 + j]); bitmap = gfx_v8_0_get_cu_active_bitmap(adev); cu_info->bitmap[i][j] = bitmap; @@ -6245,7 +6517,7 @@ static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev) ao_cu_mask |= (ao_bitmap << (i * 16 + j * 8)); } } - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); cu_info->number = active_cu_number; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h index 16a49f53a2fa..bc82c794312c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.h @@ -26,7 +26,6 @@ extern const struct amd_ip_funcs gfx_v8_0_ip_funcs; -uint64_t gfx_v8_0_get_gpu_clock_counter(struct amdgpu_device *adev); void gfx_v8_0_select_se_sh(struct amdgpu_device *adev, u32 se_num, u32 sh_num); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 1feb6439cb0b..d24a82bd0c7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -39,6 +39,7 @@ static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); +static int gmc_v7_0_wait_for_idle(void *handle); MODULE_FIRMWARE("radeon/bonaire_mc.bin"); MODULE_FIRMWARE("radeon/hawaii_mc.bin"); @@ -73,39 +74,15 @@ static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) } } -/** - * gmc7_mc_wait_for_idle - wait for MC idle callback. - * - * @adev: amdgpu_device pointer - * - * Wait for the MC (memory controller) to be idle. - * (evergreen+). - * Returns 0 if the MC is idle, -1 if not. - */ -int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev) -{ - unsigned i; - u32 tmp; - - for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(mmSRBM_STATUS) & 0x1F00; - if (!tmp) - return 0; - udelay(1); - } - return -1; -} - -void gmc_v7_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_stop(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 blackout; if (adev->mode_info.num_crtc) amdgpu_display_stop_mc_access(adev, save); - amdgpu_asic_wait_for_mc_idle(adev); + gmc_v7_0_wait_for_idle((void *)adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -120,8 +97,8 @@ void gmc_v7_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -void gmc_v7_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v7_0_mc_resume(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 tmp; @@ -311,7 +288,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) amdgpu_display_set_vga_render_state(adev, false); gmc_v7_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } /* Update configuration */ @@ -331,7 +308,7 @@ static void gmc_v7_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } gmc_v7_0_mc_resume(adev, &save); @@ -1137,7 +1114,7 @@ static int gmc_v7_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v7_0_mc_stop(adev, &save); - if (gmc_v7_0_wait_for_idle(adev)) { + if (gmc_v7_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h index 36fcbbc46ada..0b386b5d2f7a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.h @@ -26,11 +26,4 @@ extern const struct amd_ip_funcs gmc_v7_0_ip_funcs; -/* XXX these shouldn't be exported */ -void gmc_v7_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -void gmc_v7_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -int gmc_v7_0_mc_wait_for_idle(struct amdgpu_device *adev); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 9945d5bbf1fe..717359d3ba8c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -41,6 +41,7 @@ static void gmc_v8_0_set_gart_funcs(struct amdgpu_device *adev); static void gmc_v8_0_set_irq_funcs(struct amdgpu_device *adev); +static int gmc_v8_0_wait_for_idle(void *handle); MODULE_FIRMWARE("amdgpu/tonga_mc.bin"); MODULE_FIRMWARE("amdgpu/polaris11_mc.bin"); @@ -147,44 +148,15 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev) } } -/** - * gmc8_mc_wait_for_idle - wait for MC idle callback. - * - * @adev: amdgpu_device pointer - * - * Wait for the MC (memory controller) to be idle. - * (evergreen+). - * Returns 0 if the MC is idle, -1 if not. - */ -int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev) -{ - unsigned i; - u32 tmp; - - for (i = 0; i < adev->usec_timeout; i++) { - /* read MC_STATUS */ - tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__VMC_BUSY_MASK | - SRBM_STATUS__MCB_BUSY_MASK | - SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | - SRBM_STATUS__MCC_BUSY_MASK | - SRBM_STATUS__MCD_BUSY_MASK | - SRBM_STATUS__VMC1_BUSY_MASK); - if (!tmp) - return 0; - udelay(1); - } - return -1; -} - -void gmc_v8_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_stop(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 blackout; if (adev->mode_info.num_crtc) amdgpu_display_stop_mc_access(adev, save); - amdgpu_asic_wait_for_mc_idle(adev); + gmc_v8_0_wait_for_idle(adev); blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { @@ -199,8 +171,8 @@ void gmc_v8_0_mc_stop(struct amdgpu_device *adev, udelay(100); } -void gmc_v8_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save) +static void gmc_v8_0_mc_resume(struct amdgpu_device *adev, + struct amdgpu_mode_mc_save *save) { u32 tmp; @@ -393,7 +365,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) amdgpu_display_set_vga_render_state(adev, false); gmc_v8_0_mc_stop(adev, &save); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } /* Update configuration */ @@ -413,7 +385,7 @@ static void gmc_v8_0_mc_program(struct amdgpu_device *adev) WREG32(mmMC_VM_AGP_BASE, 0); WREG32(mmMC_VM_AGP_TOP, 0x0FFFFFFF); WREG32(mmMC_VM_AGP_BOT, 0x0FFFFFFF); - if (amdgpu_asic_wait_for_mc_idle(adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for MC idle timedout !\n"); } gmc_v8_0_mc_resume(adev, &save); @@ -1140,7 +1112,7 @@ static int gmc_v8_0_soft_reset(void *handle) if (srbm_soft_reset) { gmc_v8_0_mc_stop(adev, &save); - if (gmc_v8_0_wait_for_idle(adev)) { + if (gmc_v8_0_wait_for_idle((void *)adev)) { dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h index 973436086b38..fc5001a8119d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.h @@ -26,11 +26,4 @@ extern const struct amd_ip_funcs gmc_v8_0_ip_funcs; -/* XXX these shouldn't be exported */ -void gmc_v8_0_mc_stop(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -void gmc_v8_0_mc_resume(struct amdgpu_device *adev, - struct amdgpu_mode_mc_save *save); -int gmc_v8_0_mc_wait_for_idle(struct amdgpu_device *adev); - #endif diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c index 460bc8ad37e6..825ccd63f2dc 100644 --- a/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/iceland_dpm.c @@ -72,6 +72,11 @@ static int iceland_dpm_sw_init(void *handle) static int iceland_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c index a789a863d677..5a0e245771ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c @@ -507,19 +507,19 @@ static int kv_enable_didt(struct amdgpu_device *adev, bool enable) pi->caps_db_ramping || pi->caps_td_ramping || pi->caps_tcp_ramping) { - gfx_v7_0_enter_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->enter_safe_mode(adev); if (enable) { ret = kv_program_pt_config_registers(adev, didt_config_kv); if (ret) { - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); return ret; } } kv_do_enable_didt(adev, enable); - gfx_v7_0_exit_rlc_safe_mode(adev); + adev->gfx.rlc.funcs->exit_safe_mode(adev); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index f4c3130d3fdb..ac3730a6e49f 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -105,6 +105,15 @@ static void sdma_v2_4_init_golden_registers(struct amdgpu_device *adev) } } +static void sdma_v2_4_free_microcode(struct amdgpu_device *adev) +{ + int i; + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + } +} + /** * sdma_v2_4_init_microcode - load ucode images from disk * @@ -246,19 +255,6 @@ static void sdma_v2_4_ring_emit_ib(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 vmid = vm_id & 0xf; - u32 next_rptr = ring->wptr + 5; - - while ((next_rptr & 7) != 2) - next_rptr++; - - next_rptr += 6; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); - amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); - amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); - amdgpu_ring_write(ring, next_rptr); /* IB packet must end on a 8 DW boundary */ sdma_v2_4_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); @@ -461,6 +457,8 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], @@ -489,7 +487,11 @@ static int sdma_v2_4_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); ring->ready = true; + } + sdma_v2_4_enable(adev, true); + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; @@ -580,8 +582,8 @@ static int sdma_v2_4_start(struct amdgpu_device *adev) return -EINVAL; } - /* unhalt the MEs */ - sdma_v2_4_enable(adev, true); + /* halt the engine before programing */ + sdma_v2_4_enable(adev, false); /* start the gfx rings and rlc compute queues */ r = sdma_v2_4_gfx_resume(adev); @@ -1012,6 +1014,7 @@ static int sdma_v2_4_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + sdma_v2_4_free_microcode(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 31d99b0010f7..f00db6f4c04c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -236,6 +236,15 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev) } } +static void sdma_v3_0_free_microcode(struct amdgpu_device *adev) +{ + int i; + for (i = 0; i < adev->sdma.num_instances; i++) { + release_firmware(adev->sdma.instance[i].fw); + adev->sdma.instance[i].fw = NULL; + } +} + /** * sdma_v3_0_init_microcode - load ucode images from disk * @@ -406,18 +415,6 @@ static void sdma_v3_0_ring_emit_ib(struct amdgpu_ring *ring, unsigned vm_id, bool ctx_switch) { u32 vmid = vm_id & 0xf; - u32 next_rptr = ring->wptr + 5; - - while ((next_rptr & 7) != 2) - next_rptr++; - next_rptr += 6; - - amdgpu_ring_write(ring, SDMA_PKT_HEADER_OP(SDMA_OP_WRITE) | - SDMA_PKT_HEADER_SUB_OP(SDMA_SUBOP_WRITE_LINEAR)); - amdgpu_ring_write(ring, lower_32_bits(ring->next_rptr_gpu_addr) & 0xfffffffc); - amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr)); - amdgpu_ring_write(ring, SDMA_PKT_WRITE_UNTILED_DW_3_COUNT(1)); - amdgpu_ring_write(ring, next_rptr); /* IB packet must end on a 8 DW boundary */ sdma_v3_0_ring_insert_nop(ring, (10 - (ring->wptr & 7)) % 8); @@ -672,6 +669,8 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) /* Initialize the ring buffer's read and write pointers */ WREG32(mmSDMA0_GFX_RB_RPTR + sdma_offsets[i], 0); WREG32(mmSDMA0_GFX_RB_WPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_RPTR + sdma_offsets[i], 0); + WREG32(mmSDMA0_GFX_IB_OFFSET + sdma_offsets[i], 0); /* set the wb address whether it's enabled or not */ WREG32(mmSDMA0_GFX_RB_RPTR_ADDR_HI + sdma_offsets[i], @@ -711,7 +710,15 @@ static int sdma_v3_0_gfx_resume(struct amdgpu_device *adev) WREG32(mmSDMA0_GFX_IB_CNTL + sdma_offsets[i], ib_cntl); ring->ready = true; + } + /* unhalt the MEs */ + sdma_v3_0_enable(adev, true); + /* enable sdma ring preemption */ + sdma_v3_0_ctx_switch_enable(adev, true); + + for (i = 0; i < adev->sdma.num_instances; i++) { + ring = &adev->sdma.instance[i].ring; r = amdgpu_ring_test_ring(ring); if (r) { ring->ready = false; @@ -804,10 +811,9 @@ static int sdma_v3_0_start(struct amdgpu_device *adev) } } - /* unhalt the MEs */ - sdma_v3_0_enable(adev, true); - /* enable sdma ring preemption */ - sdma_v3_0_ctx_switch_enable(adev, true); + /* disble sdma engine before programing it */ + sdma_v3_0_ctx_switch_enable(adev, false); + sdma_v3_0_enable(adev, false); /* start the gfx rings and rlc compute queues */ r = sdma_v3_0_gfx_resume(adev); @@ -1247,6 +1253,7 @@ static int sdma_v3_0_sw_fini(void *handle) for (i = 0; i < adev->sdma.num_instances; i++) amdgpu_ring_fini(&adev->sdma.instance[i].ring); + sdma_v3_0_free_microcode(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c index b7615cefcac4..f06f6f4dc3a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c +++ b/drivers/gpu/drm/amd/amdgpu/tonga_dpm.c @@ -71,6 +71,11 @@ static int tonga_dpm_sw_init(void *handle) static int tonga_dpm_sw_fini(void *handle) { + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c index f07551476a70..416c8567d3ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c @@ -34,6 +34,8 @@ #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" +#include "bif/bif_4_1_d.h" + static void uvd_v4_2_mc_resume(struct amdgpu_device *adev); static void uvd_v4_2_init_cg(struct amdgpu_device *adev); static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev); @@ -439,6 +441,32 @@ static void uvd_v4_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** + * uvd_v4_2_ring_emit_hdp_flush - emit an hdp flush + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp flush. + */ +static void uvd_v4_2_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); + amdgpu_ring_write(ring, 0); +} + +/** + * uvd_v4_2_ring_hdp_invalidate - emit an hdp invalidate + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp invalidate. + */ +static void uvd_v4_2_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); + amdgpu_ring_write(ring, 1); +} + +/** * uvd_v4_2_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -763,6 +791,8 @@ static const struct amdgpu_ring_funcs uvd_v4_2_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v4_2_ring_emit_ib, .emit_fence = uvd_v4_2_ring_emit_fence, + .emit_hdp_flush = uvd_v4_2_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v4_2_ring_emit_hdp_invalidate, .test_ring = uvd_v4_2_ring_test_ring, .test_ib = uvd_v4_2_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c index e0a76a883d46..dd636c4c4b08 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c @@ -31,6 +31,7 @@ #include "uvd/uvd_5_0_sh_mask.h" #include "oss/oss_2_0_d.h" #include "oss/oss_2_0_sh_mask.h" +#include "bif/bif_5_0_d.h" #include "vi.h" static void uvd_v5_0_set_ring_funcs(struct amdgpu_device *adev); @@ -489,6 +490,32 @@ static void uvd_v5_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** + * uvd_v5_0_ring_emit_hdp_flush - emit an hdp flush + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp flush. + */ +static void uvd_v5_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); + amdgpu_ring_write(ring, 0); +} + +/** + * uvd_v5_0_ring_hdp_invalidate - emit an hdp invalidate + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp invalidate. + */ +static void uvd_v5_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); + amdgpu_ring_write(ring, 1); +} + +/** * uvd_v5_0_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -815,6 +842,8 @@ static const struct amdgpu_ring_funcs uvd_v5_0_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v5_0_ring_emit_ib, .emit_fence = uvd_v5_0_ring_emit_fence, + .emit_hdp_flush = uvd_v5_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v5_0_ring_emit_hdp_invalidate, .test_ring = uvd_v5_0_ring_test_ring, .test_ib = uvd_v5_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index c9929d665c01..07e9a987fbee 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -33,6 +33,7 @@ #include "oss/oss_2_0_sh_mask.h" #include "smu/smu_7_1_3_d.h" #include "smu/smu_7_1_3_sh_mask.h" +#include "bif/bif_5_1_d.h" #include "vi.h" static void uvd_v6_0_set_ring_funcs(struct amdgpu_device *adev); @@ -385,8 +386,8 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) uint32_t mp_swap_cntl; int i, j, r; - /*disable DPG */ - WREG32_P(mmUVD_POWER_STATUS, 0, ~(1 << 2)); + /* disable DPG */ + WREG32_P(mmUVD_POWER_STATUS, 0, ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); /* disable byte swapping */ lmi_swap_cntl = 0; @@ -405,17 +406,21 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) } /* disable interupt */ - WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1)); + WREG32_P(mmUVD_MASTINT_EN, 0, ~UVD_MASTINT_EN__VCPU_EN_MASK); /* stall UMC and register bus before resetting VCPU */ - WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + WREG32_P(mmUVD_LMI_CTRL2, UVD_LMI_CTRL2__STALL_ARB_UMC_MASK, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); mdelay(1); /* put LMI, VCPU, RBC etc... into reset */ - WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | - UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | - UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | - UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | + WREG32(mmUVD_SOFT_RESET, + UVD_SOFT_RESET__LMI_SOFT_RESET_MASK | + UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK | + UVD_SOFT_RESET__LBSI_SOFT_RESET_MASK | + UVD_SOFT_RESET__RBC_SOFT_RESET_MASK | + UVD_SOFT_RESET__CSM_SOFT_RESET_MASK | + UVD_SOFT_RESET__CXW_SOFT_RESET_MASK | + UVD_SOFT_RESET__TAP_SOFT_RESET_MASK | UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK); mdelay(5); @@ -424,8 +429,13 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) mdelay(5); /* initialize UVD memory controller */ - WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) | - (1 << 21) | (1 << 9) | (1 << 20)); + WREG32(mmUVD_LMI_CTRL, + (0x40 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__DISABLE_ON_FWV_FAIL_MASK); #ifdef __BIG_ENDIAN /* swap (8 in 32) RB and IB */ @@ -447,10 +457,10 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) mdelay(5); /* enable VCPU clock */ - WREG32(mmUVD_VCPU_CNTL, 1 << 9); + WREG32(mmUVD_VCPU_CNTL, UVD_VCPU_CNTL__CLK_EN_MASK); /* enable UMC */ - WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8)); + WREG32_P(mmUVD_LMI_CTRL2, 0, ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); /* boot up the VCPU */ WREG32(mmUVD_SOFT_RESET, 0); @@ -484,10 +494,12 @@ static int uvd_v6_0_start(struct amdgpu_device *adev) return r; } /* enable master interrupt */ - WREG32_P(mmUVD_MASTINT_EN, 3 << 1, ~(3 << 1)); + WREG32_P(mmUVD_MASTINT_EN, + (UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK), + ~(UVD_MASTINT_EN__VCPU_EN_MASK|UVD_MASTINT_EN__SYS_EN_MASK)); /* clear the bit 4 of UVD_STATUS */ - WREG32_P(mmUVD_STATUS, 0, ~(2 << 1)); + WREG32_P(mmUVD_STATUS, 0, ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); rb_bufsz = order_base_2(ring->ring_size); tmp = 0; @@ -581,6 +593,32 @@ static void uvd_v6_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq } /** + * uvd_v6_0_ring_emit_hdp_flush - emit an hdp flush + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp flush. + */ +static void uvd_v6_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_MEM_COHERENCY_FLUSH_CNTL, 0)); + amdgpu_ring_write(ring, 0); +} + +/** + * uvd_v6_0_ring_hdp_invalidate - emit an hdp invalidate + * + * @ring: amdgpu_ring pointer + * + * Emits an hdp invalidate. + */ +static void uvd_v6_0_ring_emit_hdp_invalidate(struct amdgpu_ring *ring) +{ + amdgpu_ring_write(ring, PACKET0(mmHDP_DEBUG0, 0)); + amdgpu_ring_write(ring, 1); +} + +/** * uvd_v6_0_ring_test_ring - register write test * * @ring: amdgpu_ring pointer @@ -847,7 +885,8 @@ static int uvd_v6_0_set_clockgating_state(void *handle, bool enable = (state == AMD_CG_STATE_GATE) ? true : false; static int curstate = -1; - if (adev->asic_type == CHIP_FIJI) + if (adev->asic_type == CHIP_FIJI || + adev->asic_type == CHIP_POLARIS10) uvd_v6_set_bypass_mode(adev, enable); if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) @@ -919,6 +958,8 @@ static const struct amdgpu_ring_funcs uvd_v6_0_ring_funcs = { .parse_cs = amdgpu_uvd_ring_parse_cs, .emit_ib = uvd_v6_0_ring_emit_ib, .emit_fence = uvd_v6_0_ring_emit_fence, + .emit_hdp_flush = uvd_v6_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = uvd_v6_0_ring_emit_hdp_invalidate, .test_ring = uvd_v6_0_ring_test_ring, .test_ib = uvd_v6_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index 2c88d0b66cf3..cda7def9dc2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -203,6 +203,29 @@ static void vi_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) spin_unlock_irqrestore(&adev->didt_idx_lock, flags); } +static u32 vi_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) +{ + unsigned long flags; + u32 r; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32(mmGC_CAC_IND_INDEX, (reg)); + r = RREG32(mmGC_CAC_IND_DATA); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); + return r; +} + +static void vi_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) +{ + unsigned long flags; + + spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); + WREG32(mmGC_CAC_IND_INDEX, (reg)); + WREG32(mmGC_CAC_IND_DATA, (v)); + spin_unlock_irqrestore(&adev->gc_cac_idx_lock, flags); +} + + static const u32 tonga_mgcg_cgcg_init[] = { mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100, @@ -421,6 +444,20 @@ static bool vi_read_bios_from_rom(struct amdgpu_device *adev, return true; } +static u32 vi_get_virtual_caps(struct amdgpu_device *adev) +{ + u32 caps = 0; + u32 reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER); + + if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, IOV_ENABLE)) + caps |= AMDGPU_VIRT_CAPS_SRIOV_EN; + + if (REG_GET_FIELD(reg, BIF_IOV_FUNC_IDENTIFIER, FUNC_IDENTIFIER)) + caps |= AMDGPU_VIRT_CAPS_IS_VF; + + return caps; +} + static const struct amdgpu_allowed_register_entry tonga_allowed_read_registers[] = { {mmGB_MACROTILE_MODE7, true}, }; @@ -519,12 +556,12 @@ static uint32_t vi_read_indexed_register(struct amdgpu_device *adev, u32 se_num, mutex_lock(&adev->grbm_idx_mutex); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v8_0_select_se_sh(adev, se_num, sh_num); + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); val = RREG32(reg_offset); if (se_num != 0xffffffff || sh_num != 0xffffffff) - gfx_v8_0_select_se_sh(adev, 0xffffffff, 0xffffffff); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); mutex_unlock(&adev->grbm_idx_mutex); return val; } @@ -583,7 +620,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num, return -EINVAL; } -static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) +static int vi_gpu_pci_config_reset(struct amdgpu_device *adev) { u32 i; @@ -598,11 +635,14 @@ static void vi_gpu_pci_config_reset(struct amdgpu_device *adev) /* wait for asic to come out of reset */ for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) - break; + if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { + /* enable BM */ + pci_set_master(adev->pdev); + return 0; + } udelay(1); } - + return -EINVAL; } static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hung) @@ -628,13 +668,15 @@ static void vi_set_bios_scratch_engine_hung(struct amdgpu_device *adev, bool hun */ static int vi_asic_reset(struct amdgpu_device *adev) { + int r; + vi_set_bios_scratch_engine_hung(adev, true); - vi_gpu_pci_config_reset(adev); + r = vi_gpu_pci_config_reset(adev); vi_set_bios_scratch_engine_hung(adev, false); - return 0; + return r; } static int vi_set_uvd_clock(struct amdgpu_device *adev, u32 clock, @@ -1118,9 +1160,7 @@ static const struct amdgpu_asic_funcs vi_asic_funcs = .get_xclk = &vi_get_xclk, .set_uvd_clocks = &vi_set_uvd_clocks, .set_vce_clocks = &vi_set_vce_clocks, - /* these should be moved to their own ip modules */ - .get_gpu_clock_counter = &gfx_v8_0_get_gpu_clock_counter, - .wait_for_mc_idle = &gmc_v8_0_mc_wait_for_idle, + .get_virtual_caps = &vi_get_virtual_caps, }; static int vi_common_early_init(void *handle) @@ -1141,6 +1181,8 @@ static int vi_common_early_init(void *handle) adev->uvd_ctx_wreg = &vi_uvd_ctx_wreg; adev->didt_rreg = &vi_didt_rreg; adev->didt_wreg = &vi_didt_wreg; + adev->gc_cac_rreg = &vi_gc_cac_rreg; + adev->gc_cac_wreg = &vi_gc_cac_wreg; adev->asic_funcs = &vi_asic_funcs; @@ -1207,19 +1249,39 @@ static int vi_common_early_init(void *handle) AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; + /* rev0 hardware doesn't support PG */ adev->pg_flags = 0; + if (adev->rev_id != 0x00) + adev->pg_flags |= AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_RLC_SMU_HS | + AMD_PG_SUPPORT_GFX_PIPELINE; adev->external_rev_id = adev->rev_id + 0x1; break; case CHIP_STONEY: adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG | AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_RLC_LS | + AMD_CG_SUPPORT_GFX_CP_LS | + AMD_CG_SUPPORT_GFX_CGTS | + AMD_CG_SUPPORT_GFX_MGLS | + AMD_CG_SUPPORT_GFX_CGTS_LS | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_BIF_LS | AMD_CG_SUPPORT_HDP_MGCG | AMD_CG_SUPPORT_HDP_LS | AMD_CG_SUPPORT_SDMA_MGCG | AMD_CG_SUPPORT_SDMA_LS; - adev->pg_flags = 0; + adev->pg_flags = AMD_PG_SUPPORT_GFX_PG | + AMD_PG_SUPPORT_GFX_SMG | + AMD_PG_SUPPORT_GFX_DMG | + AMD_PG_SUPPORT_GFX_PIPELINE | + AMD_PG_SUPPORT_CP | + AMD_PG_SUPPORT_RLC_SMU_HS; adev->external_rev_id = adev->rev_id + 0x1; break; default: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index ac005796b71c..4f3849ac8c07 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -63,13 +63,12 @@ static struct kfd_process *create_process(const struct task_struct *thread); void kfd_process_create_wq(void) { if (!kfd_process_wq) - kfd_process_wq = create_workqueue("kfd_process_wq"); + kfd_process_wq = alloc_workqueue("kfd_process_wq", 0, 0); } void kfd_process_destroy_wq(void) { if (kfd_process_wq) { - flush_workqueue(kfd_process_wq); destroy_workqueue(kfd_process_wq); kfd_process_wq = NULL; } @@ -242,13 +241,19 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, pqm_uninit(&p->pqm); /* Iterate over all process device data structure and check - * if we should reset all wavefronts */ - list_for_each_entry(pdd, &p->per_device_data, per_device_list) + * if we should delete debug managers and reset all wavefronts + */ + list_for_each_entry(pdd, &p->per_device_data, per_device_list) { + if ((pdd->dev->dbgmgr) && + (pdd->dev->dbgmgr->pasid == p->pasid)) + kfd_dbgmgr_destroy(pdd->dev->dbgmgr); + if (pdd->reset_wavefronts) { pr_warn("amdkfd: Resetting all wave fronts\n"); dbgdev_wave_reset_wavefronts(pdd->dev, p); pdd->reset_wavefronts = false; } + } mutex_unlock(&p->mutex); @@ -324,6 +329,7 @@ err_process_pqm_init: synchronize_rcu(); mmu_notifier_unregister_no_release(&process->mmu_notifier, process->mm); err_mmu_notifier: + mutex_destroy(&process->mutex); kfd_pasid_free(process->pasid); err_alloc_pasid: kfree(process->queues); @@ -404,42 +410,52 @@ void kfd_unbind_process_from_device(struct kfd_dev *dev, unsigned int pasid) idx = srcu_read_lock(&kfd_processes_srcu); + /* + * Look for the process that matches the pasid. If there is no such + * process, we either released it in amdkfd's own notifier, or there + * is a bug. Unfortunately, there is no way to tell... + */ hash_for_each_rcu(kfd_processes_table, i, p, kfd_processes) - if (p->pasid == pasid) - break; + if (p->pasid == pasid) { - srcu_read_unlock(&kfd_processes_srcu, idx); + srcu_read_unlock(&kfd_processes_srcu, idx); - BUG_ON(p->pasid != pasid); + pr_debug("Unbinding process %d from IOMMU\n", pasid); - mutex_lock(&p->mutex); + mutex_lock(&p->mutex); - if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) - kfd_dbgmgr_destroy(dev->dbgmgr); + if ((dev->dbgmgr) && (dev->dbgmgr->pasid == p->pasid)) + kfd_dbgmgr_destroy(dev->dbgmgr); - pqm_uninit(&p->pqm); + pqm_uninit(&p->pqm); - pdd = kfd_get_process_device_data(dev, p); + pdd = kfd_get_process_device_data(dev, p); - if (!pdd) { - mutex_unlock(&p->mutex); - return; - } + if (!pdd) { + mutex_unlock(&p->mutex); + return; + } - if (pdd->reset_wavefronts) { - dbgdev_wave_reset_wavefronts(pdd->dev, p); - pdd->reset_wavefronts = false; - } + if (pdd->reset_wavefronts) { + dbgdev_wave_reset_wavefronts(pdd->dev, p); + pdd->reset_wavefronts = false; + } - /* - * Just mark pdd as unbound, because we still need it to call - * amd_iommu_unbind_pasid() in when the process exits. - * We don't call amd_iommu_unbind_pasid() here - * because the IOMMU called us. - */ - pdd->bound = false; + /* + * Just mark pdd as unbound, because we still need it + * to call amd_iommu_unbind_pasid() in when the + * process exits. + * We don't call amd_iommu_unbind_pasid() here + * because the IOMMU called us. + */ + pdd->bound = false; - mutex_unlock(&p->mutex); + mutex_unlock(&p->mutex); + + return; + } + + srcu_read_unlock(&kfd_processes_srcu, idx); } struct kfd_process_device *kfd_get_first_process_device_data(struct kfd_process *p) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 74909e72a009..884c96f50c3d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -666,7 +666,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.simd_count); if (dev->mem_bank_count < dev->node_props.mem_banks_count) { - pr_warn("kfd: mem_banks_count truncated from %d to %d\n", + pr_info_once("kfd: mem_banks_count truncated from %d to %d\n", dev->node_props.mem_banks_count, dev->mem_bank_count); sysfs_show_32bit_prop(buffer, "mem_banks_count", diff --git a/drivers/gpu/drm/amd/include/amd_pcie.h b/drivers/gpu/drm/amd/include/amd_pcie.h index 7c2a916c1e63..5eb895fd98bf 100644 --- a/drivers/gpu/drm/amd/include/amd_pcie.h +++ b/drivers/gpu/drm/amd/include/amd_pcie.h @@ -37,6 +37,13 @@ #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_MASK 0x0000FFFF #define CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_SHIFT 0 +/* gen: chipset 1/2, asic 1/2/3 */ +#define AMDGPU_DEFAULT_PCIE_GEN_MASK (CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1 \ + | CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2 \ + | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 \ + | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 \ + | CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3) + /* Following flags shows PCIe lane width switch supported in driver which are decided by chipset and ASIC */ #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 0x00010000 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 0x00020000 @@ -47,4 +54,11 @@ #define CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 0x00400000 #define CAIL_PCIE_LINK_WIDTH_SUPPORT_SHIFT 16 +/* 1/2/4/8/16 lanes */ +#define AMDGPU_DEFAULT_PCIE_MLW_MASK (CAIL_PCIE_LINK_WIDTH_SUPPORT_X1 \ + | CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 \ + | CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 \ + | CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 \ + | CAIL_PCIE_LINK_WIDTH_SUPPORT_X16) + #endif diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index 6080951d539d..a74a0d2ff1ca 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -26,15 +26,6 @@ #define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */ /* -* Supported GPU families (aligned with amdgpu_drm.h) -*/ -#define AMD_FAMILY_UNKNOWN 0 -#define AMD_FAMILY_CI 120 /* Bonaire, Hawaii */ -#define AMD_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */ -#define AMD_FAMILY_VI 130 /* Iceland, Tonga */ -#define AMD_FAMILY_CZ 135 /* Carrizo */ - -/* * Supported ASIC types */ enum amd_asic_type { @@ -120,6 +111,8 @@ enum amd_powergating_state { #define AMD_PG_SUPPORT_SDMA (1 << 8) #define AMD_PG_SUPPORT_ACP (1 << 9) #define AMD_PG_SUPPORT_SAMU (1 << 10) +#define AMD_PG_SUPPORT_GFX_QUICK_MG (1 << 11) +#define AMD_PG_SUPPORT_GFX_PIPELINE (1 << 12) enum amd_pm_state_type { /* not used for dpm */ @@ -157,6 +150,7 @@ struct amd_ip_funcs { int (*hw_init)(void *handle); /* tears down the hw state */ int (*hw_fini)(void *handle); + void (*late_fini)(void *handle); /* handles IP specific hw/sw changes for suspend */ int (*suspend)(void *handle); /* handles IP specific hw/sw changes for resume */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h index ebaf67bb1589..90ff7c8a6011 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_d.h @@ -2823,4 +2823,7 @@ #define mmDC_EDC_CSINVOC_CNT 0x3192 #define mmDC_EDC_RESTORE_CNT 0x3193 +#define mmGC_CAC_IND_INDEX 0x129a +#define mmGC_CAC_IND_DATA 0x129b + #endif /* GFX_8_0_D_H */ diff --git a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h index 7d722458d9f5..4070ca3a68eb 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gca/gfx_8_0_sh_mask.h @@ -8730,8 +8730,6 @@ #define RLC_GPM_STAT__DYN_CU_POWERING_DOWN__SHIFT 0x10 #define RLC_GPM_STAT__ABORTED_PD_SEQUENCE_MASK 0x20000 #define RLC_GPM_STAT__ABORTED_PD_SEQUENCE__SHIFT 0x11 -#define RLC_GPM_STAT__RESERVED_MASK 0xfc0000 -#define RLC_GPM_STAT__RESERVED__SHIFT 0x12 #define RLC_GPM_STAT__PG_ERROR_STATUS_MASK 0xff000000 #define RLC_GPM_STAT__PG_ERROR_STATUS__SHIFT 0x18 #define RLC_GPU_CLOCK_32_RES_SEL__RES_SEL_MASK 0x3f @@ -8764,8 +8762,10 @@ #define RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE__SHIFT 0x12 #define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE_MASK 0x80000 #define RLC_PG_CNTL__SMU_HANDSHAKE_ENABLE__SHIFT 0x13 -#define RLC_PG_CNTL__RESERVED1_MASK 0xf00000 -#define RLC_PG_CNTL__RESERVED1__SHIFT 0x14 +#define RLC_PG_CNTL__QUICK_PG_ENABLE_MASK 0x100000 +#define RLC_PG_CNTL__QUICK_PG_ENABLE__SHIFT 0x14 +#define RLC_PG_CNTL__RESERVED1_MASK 0xe00000 +#define RLC_PG_CNTL__RESERVED1__SHIFT 0x15 #define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY_MASK 0xff #define RLC_GPM_THREAD_PRIORITY__THREAD0_PRIORITY__SHIFT 0x0 #define RLC_GPM_THREAD_PRIORITY__THREAD1_PRIORITY_MASK 0xff00 @@ -9102,8 +9102,6 @@ #define RLC_GPM_LOG_CONT__CONT__SHIFT 0x0 #define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG_MASK 0xff #define RLC_PG_DELAY_3__CGCG_ACTIVE_BEFORE_CGPG__SHIFT 0x0 -#define RLC_PG_DELAY_3__RESERVED_MASK 0xffffff00 -#define RLC_PG_DELAY_3__RESERVED__SHIFT 0x8 #define RLC_GPM_INT_DISABLE_TH0__DISABLE_MASK 0xffffffff #define RLC_GPM_INT_DISABLE_TH0__DISABLE__SHIFT 0x0 #define RLC_GPM_INT_DISABLE_TH1__DISABLE_MASK 0xffffffff @@ -9124,14 +9122,8 @@ #define RLC_SRM_DEBUG_SELECT__RESERVED__SHIFT 0x8 #define RLC_SRM_DEBUG__DATA_MASK 0xffffffff #define RLC_SRM_DEBUG__DATA__SHIFT 0x0 -#define RLC_SRM_ARAM_ADDR__ADDR_MASK 0x3ff -#define RLC_SRM_ARAM_ADDR__ADDR__SHIFT 0x0 -#define RLC_SRM_ARAM_ADDR__RESERVED_MASK 0xfffffc00 -#define RLC_SRM_ARAM_ADDR__RESERVED__SHIFT 0xa #define RLC_SRM_ARAM_DATA__DATA_MASK 0xffffffff #define RLC_SRM_ARAM_DATA__DATA__SHIFT 0x0 -#define RLC_SRM_DRAM_ADDR__ADDR_MASK 0x3ff -#define RLC_SRM_DRAM_ADDR__ADDR__SHIFT 0x0 #define RLC_SRM_DRAM_ADDR__RESERVED_MASK 0xfffffc00 #define RLC_SRM_DRAM_ADDR__RESERVED__SHIFT 0xa #define RLC_SRM_DRAM_DATA__DATA_MASK 0xffffffff @@ -17946,8 +17938,6 @@ #define VGT_TESS_DISTRIBUTION__ACCUM_TRI__SHIFT 0x8 #define VGT_TESS_DISTRIBUTION__ACCUM_QUAD_MASK 0xff0000 #define VGT_TESS_DISTRIBUTION__ACCUM_QUAD__SHIFT 0x10 -#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT_MASK 0xff000000 -#define VGT_TESS_DISTRIBUTION__DONUT_SPLIT__SHIFT 0x18 #define VGT_TF_RING_SIZE__SIZE_MASK 0xffff #define VGT_TF_RING_SIZE__SIZE__SHIFT 0x0 #define VGT_SYS_CONFIG__DUAL_CORE_EN_MASK 0x1 @@ -20502,8 +20492,6 @@ #define DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_SQ_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_SQ_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_SQ_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20558,8 +20546,6 @@ #define DIDT_DB_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_DB_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_DB_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_DB_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_DB_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_DB_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_DB_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20614,8 +20600,6 @@ #define DIDT_TD_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_TD_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_TD_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_TD_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_TD_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20670,8 +20654,6 @@ #define DIDT_TCP_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_TCP_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_TCP_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_TCP_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_TCP_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20726,8 +20708,6 @@ #define DIDT_DBR_CTRL0__DIDT_CTRL_RST__SHIFT 0x4 #define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK 0x20 #define DIDT_DBR_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT 0x5 -#define DIDT_DBR_CTRL0__UNUSED_0_MASK 0xffffffc0 -#define DIDT_DBR_CTRL0__UNUSED_0__SHIFT 0x6 #define DIDT_DBR_CTRL1__MIN_POWER_MASK 0xffff #define DIDT_DBR_CTRL1__MIN_POWER__SHIFT 0x0 #define DIDT_DBR_CTRL1__MAX_POWER_MASK 0xffff0000 @@ -20773,4 +20753,84 @@ #define DIDT_DBR_WEIGHT8_11__WEIGHT11_MASK 0xff000000 #define DIDT_DBR_WEIGHT8_11__WEIGHT11__SHIFT 0x18 +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001 +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000 + +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007e +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001 +#define DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007 + +#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L +#define DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d + +#define DIDT_SQ_STALL_CTRL__UNUSED_0_MASK 0xe0000000L +#define DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d + +#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L +#define DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000 + +#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL +#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001 +#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L +#define DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f + +#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L +#define DIDT_TD_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000 + +#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL +#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L +#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001 +#define DIDT_TD_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007 + +#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L +#define DIDT_TD_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d + +#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L +#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L +#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006 +#define DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c + +#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L +#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL +#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L + +#define DIDT_TD_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000 +#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001 +#define DIDT_TD_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f + +#define DIDT_TD_STALL_CTRL__UNUSED_0_MASK 0xe0000000L +#define DIDT_TD_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d + +#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L +#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L +#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006 +#define DIDT_TD_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c + +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK 0x00000001L +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT 0x00000000 + +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK 0x0000007eL +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK 0x00001f80L +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT 0x00000001 +#define DIDT_TCP_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT 0x00000007 + +#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK 0x1fffe000L +#define DIDT_TCP_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT 0x0000000d + +#define DIDT_TCP_STALL_CTRL__UNUSED_0_MASK 0xe0000000L +#define DIDT_TCP_STALL_CTRL__UNUSED_0__SHIFT 0x0000001d + +#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK 0x00000001L +#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK 0x00007ffeL +#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK 0x1fff8000L +#define DIDT_TCP_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT 0x00000000 +#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT 0x00000001 +#define DIDT_TCP_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT 0x0000000f + +#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK 0x00000fc0L +#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK 0x0003f000L +#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT 0x00000006 +#define DIDT_TCP_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT 0x0000000c + #endif /* GFX_8_0_SH_MASK_H */ diff --git a/drivers/gpu/drm/amd/include/atombios.h b/drivers/gpu/drm/amd/include/atombios.h index 32f3e345de08..3493da5c8f0e 100644 --- a/drivers/gpu/drm/amd/include/atombios.h +++ b/drivers/gpu/drm/amd/include/atombios.h @@ -5538,6 +5538,78 @@ typedef struct _ATOM_ASIC_PROFILING_INFO_V3_5 ULONG ulReserved[12]; }ATOM_ASIC_PROFILING_INFO_V3_5; +/* for Polars10/11 AVFS parameters */ +typedef struct _ATOM_ASIC_PROFILING_INFO_V3_6 +{ + ATOM_COMMON_TABLE_HEADER asHeader; + ULONG ulMaxVddc; + ULONG ulMinVddc; + USHORT usLkgEuseIndex; + UCHAR ucLkgEfuseBitLSB; + UCHAR ucLkgEfuseLength; + ULONG ulLkgEncodeLn_MaxDivMin; + ULONG ulLkgEncodeMax; + ULONG ulLkgEncodeMin; + EFUSE_LINEAR_FUNC_PARAM sRoFuse; + ULONG ulEvvDefaultVddc; + ULONG ulEvvNoCalcVddc; + ULONG ulSpeed_Model; + ULONG ulSM_A0; + ULONG ulSM_A1; + ULONG ulSM_A2; + ULONG ulSM_A3; + ULONG ulSM_A4; + ULONG ulSM_A5; + ULONG ulSM_A6; + ULONG ulSM_A7; + UCHAR ucSM_A0_sign; + UCHAR ucSM_A1_sign; + UCHAR ucSM_A2_sign; + UCHAR ucSM_A3_sign; + UCHAR ucSM_A4_sign; + UCHAR ucSM_A5_sign; + UCHAR ucSM_A6_sign; + UCHAR ucSM_A7_sign; + ULONG ulMargin_RO_a; + ULONG ulMargin_RO_b; + ULONG ulMargin_RO_c; + ULONG ulMargin_fixed; + ULONG ulMargin_Fmax_mean; + ULONG ulMargin_plat_mean; + ULONG ulMargin_Fmax_sigma; + ULONG ulMargin_plat_sigma; + ULONG ulMargin_DC_sigma; + ULONG ulLoadLineSlop; + ULONG ulaTDClimitPerDPM[8]; + ULONG ulaNoCalcVddcPerDPM[8]; + ULONG ulAVFS_meanNsigma_Acontant0; + ULONG ulAVFS_meanNsigma_Acontant1; + ULONG ulAVFS_meanNsigma_Acontant2; + USHORT usAVFS_meanNsigma_DC_tol_sigma; + USHORT usAVFS_meanNsigma_Platform_mean; + USHORT usAVFS_meanNsigma_Platform_sigma; + ULONG ulGB_VDROOP_TABLE_CKSOFF_a0; + ULONG ulGB_VDROOP_TABLE_CKSOFF_a1; + ULONG ulGB_VDROOP_TABLE_CKSOFF_a2; + ULONG ulGB_VDROOP_TABLE_CKSON_a0; + ULONG ulGB_VDROOP_TABLE_CKSON_a1; + ULONG ulGB_VDROOP_TABLE_CKSON_a2; + ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_m1; + USHORT usAVFSGB_FUSE_TABLE_CKSOFF_m2; + ULONG ulAVFSGB_FUSE_TABLE_CKSOFF_b; + ULONG ulAVFSGB_FUSE_TABLE_CKSON_m1; + USHORT usAVFSGB_FUSE_TABLE_CKSON_m2; + ULONG ulAVFSGB_FUSE_TABLE_CKSON_b; + USHORT usMaxVoltage_0_25mv; + UCHAR ucEnableGB_VDROOP_TABLE_CKSOFF; + UCHAR ucEnableGB_VDROOP_TABLE_CKSON; + UCHAR ucEnableGB_FUSE_TABLE_CKSOFF; + UCHAR ucEnableGB_FUSE_TABLE_CKSON; + USHORT usPSM_Age_ComFactor; + UCHAR ucEnableApplyAVFS_CKS_OFF_Voltage; + UCHAR ucReserved; +}ATOM_ASIC_PROFILING_INFO_V3_6; + typedef struct _ATOM_SCLK_FCW_RANGE_ENTRY_V1{ ULONG ulMaxSclkFreq; diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h index a461e155a160..0c8c85d2a2a5 100644 --- a/drivers/gpu/drm/amd/include/cgs_common.h +++ b/drivers/gpu/drm/amd/include/cgs_common.h @@ -49,6 +49,7 @@ enum cgs_ind_reg { CGS_IND_REG__SMC, CGS_IND_REG__UVD_CTX, CGS_IND_REG__DIDT, + CGS_IND_REG_GC_CAC, CGS_IND_REG__AUDIO_ENDPT }; @@ -115,6 +116,7 @@ enum cgs_system_info_id { CGS_SYSTEM_INFO_CG_FLAGS, CGS_SYSTEM_INFO_PG_FLAGS, CGS_SYSTEM_INFO_GFX_CU_INFO, + CGS_SYSTEM_INFO_GFX_SE_INFO, CGS_SYSTEM_INFO_ID_MAXIMUM, }; @@ -189,7 +191,6 @@ typedef unsigned long cgs_handle_t; struct cgs_acpi_method_argument { uint32_t type; - uint32_t method_length; uint32_t data_length; union{ uint32_t value; @@ -581,6 +582,9 @@ typedef int (*cgs_get_firmware_info)(struct cgs_device *cgs_device, enum cgs_ucode_id type, struct cgs_firmware_info *info); +typedef int (*cgs_rel_firmware)(struct cgs_device *cgs_device, + enum cgs_ucode_id type); + typedef int(*cgs_set_powergating_state)(struct cgs_device *cgs_device, enum amd_ip_block_type block_type, enum amd_powergating_state state); @@ -645,6 +649,7 @@ struct cgs_ops { cgs_set_camera_voltages_t set_camera_voltages; /* Firmware Info */ cgs_get_firmware_info get_firmware_info; + cgs_rel_firmware rel_firmware; /* cg pg interface*/ cgs_set_powergating_state set_powergating_state; cgs_set_clockgating_state set_clockgating_state; @@ -738,6 +743,8 @@ struct cgs_device CGS_CALL(set_camera_voltages,dev,mask,voltages) #define cgs_get_firmware_info(dev, type, info) \ CGS_CALL(get_firmware_info, dev, type, info) +#define cgs_rel_firmware(dev, type) \ + CGS_CALL(rel_firmware, dev, type) #define cgs_set_powergating_state(dev, block_type, state) \ CGS_CALL(set_powergating_state, dev, block_type, state) #define cgs_set_clockgating_state(dev, block_type, state) \ diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c index 8e345bfddb69..f9e03ad0baa2 100644 --- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c @@ -73,11 +73,14 @@ static int pp_sw_init(void *handle) ret = hwmgr->hwmgr_func->backend_init(hwmgr); if (ret) - goto err; + goto err1; pr_info("amdgpu: powerplay initialized\n"); return 0; +err1: + if (hwmgr->pptable_func->pptable_fini) + hwmgr->pptable_func->pptable_fini(hwmgr); err: pr_err("amdgpu: powerplay initialization failed\n"); return ret; @@ -100,6 +103,9 @@ static int pp_sw_fini(void *handle) if (hwmgr->hwmgr_func->backend_fini != NULL) ret = hwmgr->hwmgr_func->backend_fini(hwmgr); + if (hwmgr->pptable_func->pptable_fini) + hwmgr->pptable_func->pptable_fini(hwmgr); + return ret; } @@ -530,6 +536,10 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input, case AMD_PP_EVENT_COMPLETE_INIT: ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); break; + case AMD_PP_EVENT_READJUST_POWER_STATE: + pp_handle->hwmgr->current_ps = pp_handle->hwmgr->boot_ps; + ret = pem_handle_event(pp_handle->eventmgr, event_id, &data); + break; default: break; } @@ -734,12 +744,12 @@ static int pp_dpm_get_pp_table(void *handle, char **table) PP_CHECK_HW(hwmgr); - if (hwmgr->hwmgr_func->get_pp_table == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); - return 0; - } + if (!hwmgr->soft_pp_table) + return -EINVAL; + + *table = (char *)hwmgr->soft_pp_table; - return hwmgr->hwmgr_func->get_pp_table(hwmgr, table); + return hwmgr->soft_pp_table_size; } static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) @@ -753,12 +763,23 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) PP_CHECK_HW(hwmgr); - if (hwmgr->hwmgr_func->set_pp_table == NULL) { - printk(KERN_INFO "%s was not implemented.\n", __func__); - return 0; + if (!hwmgr->hardcode_pp_table) { + hwmgr->hardcode_pp_table = + kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); + + if (!hwmgr->hardcode_pp_table) + return -ENOMEM; + + /* to avoid powerplay crash when hardcode pptable is empty */ + memcpy(hwmgr->hardcode_pp_table, hwmgr->soft_pp_table, + hwmgr->soft_pp_table_size); } - return hwmgr->hwmgr_func->set_pp_table(hwmgr, buf, size); + memcpy(hwmgr->hardcode_pp_table, buf, size); + + hwmgr->soft_pp_table = hwmgr->hardcode_pp_table; + + return amd_powerplay_reset(handle); } static int pp_dpm_force_clock_level(void *handle, @@ -800,6 +821,82 @@ static int pp_dpm_print_clock_levels(void *handle, return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); } +static int pp_dpm_get_sclk_od(void *handle) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->get_sclk_od == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->get_sclk_od(hwmgr); +} + +static int pp_dpm_set_sclk_od(void *handle, uint32_t value) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->set_sclk_od == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); +} + +static int pp_dpm_get_mclk_od(void *handle) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->get_mclk_od == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->get_mclk_od(hwmgr); +} + +static int pp_dpm_set_mclk_od(void *handle, uint32_t value) +{ + struct pp_hwmgr *hwmgr; + + if (!handle) + return -EINVAL; + + hwmgr = ((struct pp_instance *)handle)->hwmgr; + + PP_CHECK_HW(hwmgr); + + if (hwmgr->hwmgr_func->set_mclk_od == NULL) { + printk(KERN_INFO "%s was not implemented.\n", __func__); + return 0; + } + + return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); +} + const struct amd_powerplay_funcs pp_dpm_funcs = { .get_temperature = pp_dpm_get_temperature, .load_firmware = pp_dpm_load_fw, @@ -822,6 +919,10 @@ const struct amd_powerplay_funcs pp_dpm_funcs = { .set_pp_table = pp_dpm_set_pp_table, .force_clock_level = pp_dpm_force_clock_level, .print_clock_levels = pp_dpm_print_clock_levels, + .get_sclk_od = pp_dpm_get_sclk_od, + .set_sclk_od = pp_dpm_set_sclk_od, + .get_mclk_od = pp_dpm_get_mclk_od, + .set_mclk_od = pp_dpm_set_mclk_od, }; static int amd_pp_instance_init(struct amd_pp_init *pp_init, @@ -903,6 +1004,44 @@ int amd_powerplay_fini(void *handle) return 0; } +int amd_powerplay_reset(void *handle) +{ + struct pp_instance *instance = (struct pp_instance *)handle; + struct pp_eventmgr *eventmgr; + struct pem_event_data event_data = { {0} }; + int ret; + + if (instance == NULL) + return -EINVAL; + + eventmgr = instance->eventmgr; + if (!eventmgr || !eventmgr->pp_eventmgr_fini) + return -EINVAL; + + eventmgr->pp_eventmgr_fini(eventmgr); + + ret = pp_sw_fini(handle); + if (ret) + return ret; + + kfree(instance->hwmgr->ps); + + ret = pp_sw_init(handle); + if (ret) + return ret; + + hw_init_power_state_table(instance->hwmgr); + + if (eventmgr == NULL || eventmgr->pp_eventmgr_init == NULL) + return -EINVAL; + + ret = eventmgr->pp_eventmgr_init(eventmgr); + if (ret) + return ret; + + return pem_handle_event(eventmgr, AMD_PP_EVENT_COMPLETE_INIT, &event_data); +} + /* export this function to DAL */ int amd_powerplay_display_configuration_change(void *handle, diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c index 46410e3c7349..fb88e4e5d625 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c @@ -58,9 +58,6 @@ static void pem_fini(struct pp_eventmgr *eventmgr) pem_unregister_interrupts(eventmgr); pem_handle_event(eventmgr, AMD_PP_EVENT_UNINITIALIZE, &event_data); - - if (eventmgr != NULL) - kfree(eventmgr); } int eventmgr_init(struct pp_instance *handle) diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c index 5cd123472db4..b6f45fd01fa6 100644 --- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c +++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventtasks.c @@ -132,8 +132,7 @@ int pem_task_enable_dynamic_state_management(struct pp_eventmgr *eventmgr, struc int pem_task_disable_dynamic_state_management(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) { - /* TODO */ - return 0; + return phm_disable_dynamic_state_management(eventmgr->hwmgr); } int pem_task_enable_clock_power_gatings_tasks(struct pp_eventmgr *eventmgr, struct pem_event_data *event_data) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c index 436fc16dabb6..2da548f6337e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c @@ -206,7 +206,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) AMD_IP_BLOCK_TYPE_VCE, AMD_PG_STATE_GATE); cz_enable_disable_vce_dpm(hwmgr, false); - /* TODO: to figure out why vce can't be poweroff*/ + cz_dpm_powerdown_vce(hwmgr); cz_hwmgr->vce_power_gated = true; } else { cz_dpm_powerup_vce(hwmgr); @@ -225,6 +225,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) } } } else { + cz_hwmgr->vce_power_gated = bgate; cz_dpm_update_vce_dpm(hwmgr); cz_enable_disable_vce_dpm(hwmgr, !bgate); return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c index 1f14c477d15d..9bf622e123b6 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c @@ -1180,6 +1180,13 @@ static int cz_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, static int cz_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { int result = 0; + struct cz_hwmgr *data; + + data = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; result = cz_initialize_dpm_defaults(hwmgr); if (result != 0) { @@ -1909,15 +1916,7 @@ static const struct pp_hwmgr_func cz_hwmgr_funcs = { int cz_hwmgr_init(struct pp_hwmgr *hwmgr) { - struct cz_hwmgr *cz_hwmgr; - int ret = 0; - - cz_hwmgr = kzalloc(sizeof(struct cz_hwmgr), GFP_KERNEL); - if (cz_hwmgr == NULL) - return -ENOMEM; - - hwmgr->backend = cz_hwmgr; hwmgr->hwmgr_func = &cz_hwmgr_funcs; hwmgr->pptable_func = &pptable_funcs; - return ret; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c index 24a16e49b571..744aa886a2be 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.c @@ -581,25 +581,24 @@ static int fiji_patch_boot_state(struct pp_hwmgr *hwmgr, static int fiji_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (data->soft_pp_table) { - kfree(data->soft_pp_table); - data->soft_pp_table = NULL; - } - return phm_hwmgr_backend_fini(hwmgr); } static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_hwmgr *data; uint32_t i; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); bool stay_in_boot; int result; + data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + data->dll_default_on = false; data->sram_end = SMC_RAM_END; @@ -633,6 +632,8 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) data->vddci_control = FIJI_VOLTAGE_CONTROL_NONE; data->mvdd_control = FIJI_VOLTAGE_CONTROL_NONE; + data->force_pcie_gen = PP_PCIEGenInvalid; + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) data->voltage_control = FIJI_VOLTAGE_CONTROL_BY_SVID2; @@ -732,7 +733,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_gen_cap = 0x30007; + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; else data->pcie_gen_cap = (uint32_t)sys_info.value; if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) @@ -741,7 +742,7 @@ static int fiji_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_lane_cap = 0x2f0000; + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; else data->pcie_lane_cap = (uint32_t)sys_info.value; } else { @@ -1234,6 +1235,34 @@ static int fiji_program_voting_clients(struct pp_hwmgr *hwmgr) return 0; } +static int fiji_clear_voting_clients(struct pp_hwmgr *hwmgr) +{ + /* Reset voting clients before disabling DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, 0); + + return 0; +} + /** * Get the location of various tables inside the FW image. * @@ -1361,6 +1390,17 @@ static int fiji_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, } /** +* Call SMC to reset S0/S1 to S1 and Reset SMIO to initial value +* +* @param hwmgr the address of the powerplay hardware manager. +* @return if success then 0; +*/ +static int fiji_reset_to_default(struct pp_hwmgr *hwmgr) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); +} + +/** * Initial switch from ARB F0->F1 * * @param hwmgr the address of the powerplay hardware manager. @@ -1373,6 +1413,21 @@ static int fiji_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); } +static int fiji_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) +{ + uint32_t tmp; + + tmp = (cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixSMC_SCRATCH9) & + 0x0000ff00) >> 8; + + if (tmp == MC_CG_ARB_FREQ_F0) + return 0; + + return fiji_copy_and_switch_arb_sets(hwmgr, + tmp, MC_CG_ARB_FREQ_F0); +} + static int fiji_reset_single_dpm_table(struct pp_hwmgr *hwmgr, struct fiji_single_dpm_table *dpm_table, uint32_t count) { @@ -1830,7 +1885,7 @@ static uint16_t fiji_find_closest_vddci(struct pp_hwmgr *hwmgr, uint16_t vddci) PP_ASSERT_WITH_CODE(false, "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", - return vddci_table->entries[i].value); + return vddci_table->entries[i-1].value); } static int fiji_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, @@ -3175,6 +3230,17 @@ static int fiji_enable_ulv(struct pp_hwmgr *hwmgr) return 0; } +static int fiji_disable_ulv(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_ulv_parm *ulv = &(data->ulv); + + if (ulv->ulv_supported) + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); + + return 0; +} + static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -3195,6 +3261,21 @@ static int fiji_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) return 0; } +static int fiji_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF)) { + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -1); + } + } + + return 0; +} + static int fiji_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) { struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); @@ -3355,6 +3436,70 @@ static int fiji_start_dpm(struct pp_hwmgr *hwmgr) return 0; } +static int fiji_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + /* disable SCLK dpm */ + if (!data->sclk_dpm_key_disabled) + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_DPM_Disable) == 0), + "Failed to disable SCLK DPM!", + return -1); + + /* disable MCLK dpm */ + if (!data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_SetEnabledMask, 1) == 0), + "Failed to force MCLK DPM0!", + return -1); + + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Disable) == 0), + "Failed to disable MCLK DPM!", + return -1); + } + + return 0; +} + +static int fiji_stop_dpm(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + /* disable general power management */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + GLOBAL_PWRMGT_EN, 0); + /* disable sclk deep sleep */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + DYNAMIC_PM_EN, 0); + + /* disable PCIE dpm */ + if (!data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Disable) == 0), + "Failed to disable pcie DPM during DPM Stop Function!", + return -1); + } + + if (fiji_disable_sclk_mclk_dpm(hwmgr)) { + printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); + return -1; + } + + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_Voltage_Cntl_Disable) == 0), + "Failed to disable voltage DPM during DPM Stop Function!", + return -1); + + return 0; +} + static void fiji_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) { @@ -3413,6 +3558,23 @@ static int fiji_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) return fiji_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); } +static int fiji_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + + if (data->active_auto_throttle_sources & (1 << source)) { + data->active_auto_throttle_sources &= ~(1 << source); + fiji_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int fiji_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return fiji_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr) { int tmp_result, result = 0; @@ -3527,6 +3689,64 @@ static int fiji_enable_dpm_tasks(struct pp_hwmgr *hwmgr) return result; } +static int fiji_disable_dpm_tasks(struct pp_hwmgr *hwmgr) +{ + int tmp_result, result = 0; + + tmp_result = (fiji_is_dpm_running(hwmgr)) ? 0 : -1; + PP_ASSERT_WITH_CODE(tmp_result == 0, + "DPM is not running right now, no need to disable DPM!", + return 0); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); + + tmp_result = fiji_disable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable power containment!", result = tmp_result); + + tmp_result = fiji_disable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable SMC CAC!", result = tmp_result); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); + + tmp_result = fiji_disable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable thermal auto throttle!", result = tmp_result); + + tmp_result = fiji_stop_dpm(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to stop DPM!", result = tmp_result); + + tmp_result = fiji_disable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable deep sleep master switch!", result = tmp_result); + + tmp_result = fiji_disable_ulv(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable ULV!", result = tmp_result); + + tmp_result = fiji_clear_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to clear voting clients!", result = tmp_result); + + tmp_result = fiji_reset_to_default(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to reset to default!", result = tmp_result); + + tmp_result = fiji_force_switch_to_arbf0(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to force to switch arbf0!", result = tmp_result); + + return result; +} + static int fiji_force_dpm_highest(struct pp_hwmgr *hwmgr) { struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); @@ -5069,42 +5289,6 @@ static int fiji_get_fan_control_mode(struct pp_hwmgr *hwmgr) CG_FDO_CTRL2, FDO_PWM_MODE); } -static int fiji_get_pp_table(struct pp_hwmgr *hwmgr, char **table) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, - hwmgr->soft_pp_table_size, - GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - *table = (char *)&data->soft_pp_table; - - return hwmgr->soft_pp_table_size; -} - -static int fiji_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) -{ - struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - memcpy(data->soft_pp_table, buf, size); - - hwmgr->soft_pp_table = data->soft_pp_table; - - /* TODO: re-init powerplay to implement modified pptable */ - - return 0; -} - static int fiji_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { @@ -5274,12 +5458,96 @@ bool fiji_check_smc_update_required_for_display_configuration(struct pp_hwmgr *h return is_update_required; } +static int fiji_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct fiji_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int fiji_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct fiji_power_state *fiji_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + fiji_ps = cast_phw_fiji_power_state(&ps->hardware); + + fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int fiji_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct fiji_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int fiji_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + struct fiji_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct fiji_power_state *fiji_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + fiji_ps = cast_phw_fiji_power_state(&ps->hardware); + + fiji_ps->performance_levels[fiji_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} static const struct pp_hwmgr_func fiji_hwmgr_funcs = { .backend_init = &fiji_hwmgr_backend_init, .backend_fini = &fiji_hwmgr_backend_fini, .asic_setup = &fiji_setup_asic_task, .dynamic_state_management_enable = &fiji_enable_dpm_tasks, + .dynamic_state_management_disable = &fiji_disable_dpm_tasks, .force_dpm_level = &fiji_dpm_force_dpm_level, .get_num_of_pp_table_entries = &tonga_get_number_of_powerplay_table_entries, .get_power_state_size = &fiji_get_power_state_size, @@ -5312,24 +5580,18 @@ static const struct pp_hwmgr_func fiji_hwmgr_funcs = { .get_fan_control_mode = fiji_get_fan_control_mode, .check_states_equal = fiji_check_states_equal, .check_smc_update_required_for_display_configuration = fiji_check_smc_update_required_for_display_configuration, - .get_pp_table = fiji_get_pp_table, - .set_pp_table = fiji_set_pp_table, .force_clock_level = fiji_force_clock_level, .print_clock_levels = fiji_print_clock_levels, + .get_sclk_od = fiji_get_sclk_od, + .set_sclk_od = fiji_set_sclk_od, + .get_mclk_od = fiji_get_mclk_od, + .set_mclk_od = fiji_set_mclk_od, }; int fiji_hwmgr_init(struct pp_hwmgr *hwmgr) { - struct fiji_hwmgr *data; - int ret = 0; - - data = kzalloc(sizeof(struct fiji_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - hwmgr->backend = data; hwmgr->hwmgr_func = &fiji_hwmgr_funcs; hwmgr->pptable_func = &tonga_pptable_funcs; pp_fiji_thermal_initialize(hwmgr); - return ret; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h index 170edf5a772d..bf67c2a92c68 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_hwmgr.h @@ -302,9 +302,6 @@ struct fiji_hwmgr { bool pg_acp_init; bool frtc_enabled; bool frtc_status_changed; - - /* soft pptable for re-uploading into smu */ - void *soft_pp_table; }; /* To convert to Q8.8 format for firmware */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c index db23a4068baf..44658451a8d2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.c @@ -73,17 +73,18 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr) if (!tmp) { phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); - - phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CAC); fiji_hwmgr->fast_watermark_threshold = 100; - tmp = 1; - fiji_hwmgr->enable_dte_feature = tmp ? false : true; - fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false; - fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; + if (hwmgr->powercontainment_enabled) { + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + tmp = 1; + fiji_hwmgr->enable_dte_feature = tmp ? false : true; + fiji_hwmgr->enable_tdc_limit_feature = tmp ? true : false; + fiji_hwmgr->enable_pkg_pwr_tracking_feature = tmp ? true : false; + } } } @@ -459,6 +460,23 @@ int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr) return result; } +int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC) && data->cac_enabled) { + int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableCac)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable CAC in SMC.", result = -1); + + data->cac_enabled = false; + } + return result; +} + int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) { struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); @@ -528,6 +546,48 @@ int fiji_enable_power_containment(struct pp_hwmgr *hwmgr) return result; } +int fiji_disable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct fiji_hwmgr *data = (struct fiji_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment) && + data->power_containment_features) { + int smc_result; + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_TDCLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable TDCLimit in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_DTE) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableDTE)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable DTE in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable PkgPwrTracking in SMC.", + result = smc_result); + } + data->power_containment_features = 0; + } + + return result; +} + int fiji_power_control_set_level(struct pp_hwmgr *hwmgr) { struct phm_ppt_v1_information *table_info = diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h index 55e58200f33a..fec772421733 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/fiji_powertune.h @@ -36,6 +36,19 @@ enum fiji_pt_config_reg_type { #define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 #define POWERCONTAINMENT_FEATURE_PkgPwrLimit 0x00000004 +#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xffffffc0 +#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x6 +#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xffffffc0 +#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x6 +#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xffffffc0 +#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x6 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d +#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 +#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d +#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xe0000000 +#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001d + struct fiji_pt_config_reg { uint32_t offset; uint32_t mask; @@ -58,7 +71,9 @@ void fiji_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); int fiji_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); int fiji_populate_pm_fuses(struct pp_hwmgr *hwmgr); int fiji_enable_smc_cac(struct pp_hwmgr *hwmgr); +int fiji_disable_smc_cac(struct pp_hwmgr *hwmgr); int fiji_enable_power_containment(struct pp_hwmgr *hwmgr); +int fiji_disable_power_containment(struct pp_hwmgr *hwmgr); int fiji_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); int fiji_power_control_set_level(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c index fa208ada6892..789f98ad2615 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c @@ -154,6 +154,30 @@ int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr) return ret; } +int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr) +{ + int ret = -1; + bool enabled; + + PHM_FUNC_CHECK(hwmgr); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_TablelessHardwareInterface)) { + if (hwmgr->hwmgr_func->dynamic_state_management_disable) + ret = hwmgr->hwmgr_func->dynamic_state_management_disable(hwmgr); + } else { + ret = phm_dispatch_table(hwmgr, + &(hwmgr->disable_dynamic_state_management), + NULL, NULL); + } + + enabled = ret == 0 ? false : true; + + cgs_notify_dpm_enabled(hwmgr->device, enabled); + + return ret; +} + int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level) { PHM_FUNC_CHECK(hwmgr); @@ -306,11 +330,15 @@ int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, { PHM_FUNC_CHECK(hwmgr); - if (hwmgr->hwmgr_func->store_cc6_data == NULL) + if (display_config == NULL) return -EINVAL; hwmgr->display_config = *display_config; - /* to do pass other display configuration in furture */ + + if (hwmgr->hwmgr_func->store_cc6_data == NULL) + return -EINVAL; + + /* TODO: pass other display configuration in the future */ if (hwmgr->hwmgr_func->store_cc6_data) hwmgr->hwmgr_func->store_cc6_data(hwmgr, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c index 1c48917da3cf..03b6128ebc20 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c @@ -24,6 +24,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <drm/amdgpu_drm.h> #include "cgs_common.h" #include "power_state.h" #include "hwmgr.h" @@ -58,12 +59,13 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle) hwmgr->hw_revision = pp_init->rev_id; hwmgr->usec_timeout = AMD_MAX_USEC_TIMEOUT; hwmgr->power_source = PP_PowerSource_AC; + hwmgr->powercontainment_enabled = pp_init->powercontainment_enabled; switch (hwmgr->chip_family) { - case AMD_FAMILY_CZ: + case AMDGPU_FAMILY_CZ: cz_hwmgr_init(hwmgr); break; - case AMD_FAMILY_VI: + case AMDGPU_FAMILY_VI: switch (hwmgr->chip_id) { case CHIP_TONGA: tonga_hwmgr_init(hwmgr); @@ -93,6 +95,15 @@ int hwmgr_fini(struct pp_hwmgr *hwmgr) if (hwmgr == NULL || hwmgr->ps == NULL) return -EINVAL; + /* do hwmgr finish*/ + kfree(hwmgr->hardcode_pp_table); + + kfree(hwmgr->backend); + + kfree(hwmgr->start_thermal_controller.function_list); + + kfree(hwmgr->set_temperature_range.function_list); + kfree(hwmgr->ps); kfree(hwmgr); return 0; @@ -462,7 +473,7 @@ uint16_t phm_find_closest_vddci(struct pp_atomctrl_voltage_table *vddci_table, u PP_ASSERT_WITH_CODE(false, "VDDCI is larger than max VDDCI in VDDCI Voltage Table!", - return vddci_table->entries[i].value); + return vddci_table->entries[i-1].value); } int phm_find_boot_level(void *table, diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h index 347fef127ce9..2930a3355948 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr_ppt.h @@ -39,6 +39,7 @@ struct phm_ppt_v1_clock_voltage_dependency_record { uint8_t phases; uint8_t cks_enable; uint8_t cks_voffset; + uint32_t sclk_offset; }; typedef struct phm_ppt_v1_clock_voltage_dependency_record phm_ppt_v1_clock_voltage_dependency_record; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c index 8f142a74ad08..aeec25c66aa8 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_clockpowergating.c @@ -106,11 +106,17 @@ int polaris10_phm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) data->uvd_power_gated = bgate; if (bgate) { + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_CG_STATE_GATE); polaris10_update_uvd_dpm(hwmgr, true); polaris10_phm_powerdown_uvd(hwmgr); } else { polaris10_phm_powerup_uvd(hwmgr); polaris10_update_uvd_dpm(hwmgr, false); + cgs_set_clockgating_state(hwmgr->device, + AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_UNGATE); } return 0; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c index aa6be033f21b..9d764c4d253e 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.c @@ -389,6 +389,34 @@ static int polaris10_program_voting_clients(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_clear_voting_clients(struct pp_hwmgr *hwmgr) +{ + /* Reset voting clients before disabling DPM */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_SCLK_CNT, 1); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + SCLK_PWRMGT_CNTL, RESET_BUSY_CNT, 1); + + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_0, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_1, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_2, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_3, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_4, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_5, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_6, 0); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + ixCG_FREQ_TRAN_VOTING_7, 0); + + return 0; +} + /** * Get the location of various tables inside the FW image. * @@ -515,6 +543,11 @@ static int polaris10_copy_and_switch_arb_sets(struct pp_hwmgr *hwmgr, return 0; } +static int polaris10_reset_to_default(struct pp_hwmgr *hwmgr) +{ + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ResetToDefaults); +} + /** * Initial switch from ARB F0->F1 * @@ -528,6 +561,21 @@ static int polaris10_initial_switch_from_arbf0_to_f1(struct pp_hwmgr *hwmgr) MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); } +static int polaris10_force_switch_to_arbf0(struct pp_hwmgr *hwmgr) +{ + uint32_t tmp; + + tmp = (cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, ixSMC_SCRATCH9) & + 0x0000ff00) >> 8; + + if (tmp == MC_CG_ARB_FREQ_F0) + return 0; + + return polaris10_copy_and_switch_arb_sets(hwmgr, + tmp, MC_CG_ARB_FREQ_F0); +} + static int polaris10_setup_default_pcie_table(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -999,7 +1047,7 @@ static int polaris10_get_dependency_volt_by_clk(struct pp_hwmgr *hwmgr, vddci = phm_find_closest_vddci(&(data->vddci_voltage_table), (dep_table->entries[i].vddc - (uint16_t)data->vddc_vddci_delta)); - *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; + *voltage |= (vddci * VOLTAGE_SCALE) << VDDCI_SHIFT; } if (POLARIS10_VOLTAGE_CONTROL_NONE == data->mvdd_control) @@ -1296,7 +1344,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, } mem_level->MclkFrequency = clock; - mem_level->StutterEnable = 0; mem_level->EnabledForThrottle = 1; mem_level->EnabledForActivity = 0; mem_level->UpHyst = 0; @@ -1304,7 +1351,6 @@ static int polaris10_populate_single_memory_level(struct pp_hwmgr *hwmgr, mem_level->VoltageDownHyst = 0; mem_level->ActivityLevel = (uint16_t)data->mclk_activity_target; mem_level->StutterEnable = false; - mem_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW; data->display_timing.num_existing_displays = info.display_count; @@ -1358,12 +1404,12 @@ static int polaris10_populate_all_memory_levels(struct pp_hwmgr *hwmgr) return result; } - /* in order to prevent MC activity from stutter mode to push DPM up. + /* In order to prevent MC activity from stutter mode to push DPM up, * the UVD change complements this by putting the MCLK in - * a higher state by default such that we are not effected by + * a higher state by default such that we are not affected by * up threshold or and MCLK DPM latency. */ - levels[0].ActivityLevel = (uint16_t)data->mclk_dpm0_activity_target; + levels[0].ActivityLevel = 0x1f; CONVERT_FROM_HOST_TO_SMC_US(levels[0].ActivityLevel); data->smc_state_table.MemoryDpmLevelCount = @@ -1761,12 +1807,9 @@ static int polaris10_populate_smc_initailial_state(struct pp_hwmgr *hwmgr) static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) { - uint32_t ro, efuse, efuse2, clock_freq, volt_without_cks, - volt_with_cks, value; - uint16_t clock_freq_u16; + uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min; struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - uint8_t type, i, j, cks_setting, stretch_amount, stretch_amount2, - volt_offset = 0; + uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = @@ -1778,50 +1821,38 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) * if the part is SS or FF. if RO >= 1660MHz, part is FF. */ efuse = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_EFUSE_0 + (146 * 4)); - efuse2 = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixSMU_EFUSE_0 + (148 * 4)); + ixSMU_EFUSE_0 + (67 * 4)); efuse &= 0xFF000000; efuse = efuse >> 24; - efuse2 &= 0xF; - - if (efuse2 == 1) - ro = (2300 - 1350) * efuse / 255 + 1350; - else - ro = (2500 - 1000) * efuse / 255 + 1000; - if (ro >= 1660) - type = 0; - else - type = 1; + if (hwmgr->chip_id == CHIP_POLARIS10) { + min = 1000; + max = 2300; + } else { + min = 1100; + max = 2100; + } - /* Populate Stretch amount */ - data->smc_state_table.ClockStretcherAmount = stretch_amount; + ro = efuse * (max -min)/255 + min; /* Populate Sclk_CKS_masterEn0_7 and Sclk_voltageOffset */ for (i = 0; i < sclk_table->count; i++) { data->smc_state_table.Sclk_CKS_masterEn0_7 |= sclk_table->entries[i].cks_enable << i; - volt_without_cks = (uint32_t)((14041 * - (sclk_table->entries[i].clk/100) / 10000 + 3571 + 75 - ro) * 1000 / - (4026 - (13924 * (sclk_table->entries[i].clk/100) / 10000))); - volt_with_cks = (uint32_t)((13946 * - (sclk_table->entries[i].clk/100) / 10000 + 3320 + 45 - ro) * 1000 / - (3664 - (11454 * (sclk_table->entries[i].clk/100) / 10000))); + + volt_without_cks = (uint32_t)(((ro - 40) * 1000 - 2753594 - sclk_table->entries[i].clk/100 * 136418 /1000) / \ + (sclk_table->entries[i].clk/100 * 1132925 /10000 - 242418)/100); + + volt_with_cks = (uint32_t)((ro * 1000 -2396351 - sclk_table->entries[i].clk/100 * 329021/1000) / \ + (sclk_table->entries[i].clk/10000 * 649434 /1000 - 18005)/10); + if (volt_without_cks >= volt_with_cks) volt_offset = (uint8_t)(((volt_without_cks - volt_with_cks + sclk_table->entries[i].cks_voffset) * 100 / 625) + 1); + data->smc_state_table.Sclk_voltageOffset[i] = volt_offset; } - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - STRETCH_ENABLE, 0x0); - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - masterReset, 0x1); - /* PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, staticEnable, 0x1); */ - PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, PWR_CKS_ENABLE, - masterReset, 0x0); - /* Populate CKS Lookup Table */ if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5) stretch_amount2 = 0; @@ -1835,69 +1866,6 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr) return -EINVAL); } - value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixPWR_CKS_CNTL); - value &= 0xFFC2FF87; - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq = - polaris10_clock_stretcher_lookup_table[stretch_amount2][0]; - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq = - polaris10_clock_stretcher_lookup_table[stretch_amount2][1]; - clock_freq_u16 = (uint16_t)(PP_SMC_TO_HOST_UL(data->smc_state_table. - GraphicsLevel[data->smc_state_table.GraphicsDpmLevelCount - 1].SclkSetting.SclkFrequency) / 100); - if (polaris10_clock_stretcher_lookup_table[stretch_amount2][0] < clock_freq_u16 - && polaris10_clock_stretcher_lookup_table[stretch_amount2][1] > clock_freq_u16) { - /* Program PWR_CKS_CNTL. CKS_USE_FOR_LOW_FREQ */ - value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 16; - /* Program PWR_CKS_CNTL. CKS_LDO_REFSEL */ - value |= (polaris10_clock_stretcher_lookup_table[stretch_amount2][2]) << 18; - /* Program PWR_CKS_CNTL. CKS_STRETCH_AMOUNT */ - value |= (polaris10_clock_stretch_amount_conversion - [polaris10_clock_stretcher_lookup_table[stretch_amount2][3]] - [stretch_amount]) << 3; - } - CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].minFreq); - CONVERT_FROM_HOST_TO_SMC_US(data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].maxFreq); - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting = - polaris10_clock_stretcher_lookup_table[stretch_amount2][2] & 0x7F; - data->smc_state_table.CKS_LOOKUPTable.CKS_LOOKUPTableEntry[0].setting |= - (polaris10_clock_stretcher_lookup_table[stretch_amount2][3]) << 7; - - cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, - ixPWR_CKS_CNTL, value); - - /* Populate DDT Lookup Table */ - for (i = 0; i < 4; i++) { - /* Assign the minimum and maximum VID stored - * in the last row of Clock Stretcher Voltage Table. - */ - data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].minVID = - (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][2]; - data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].maxVID = - (uint8_t) polaris10_clock_stretcher_ddt_table[type][i][3]; - /* Loop through each SCLK and check the frequency - * to see if it lies within the frequency for clock stretcher. - */ - for (j = 0; j < data->smc_state_table.GraphicsDpmLevelCount; j++) { - cks_setting = 0; - clock_freq = PP_SMC_TO_HOST_UL( - data->smc_state_table.GraphicsLevel[j].SclkSetting.SclkFrequency); - /* Check the allowed frequency against the sclk level[j]. - * Sclk's endianness has already been converted, - * and it's in 10Khz unit, - * as opposed to Data table, which is in Mhz unit. - */ - if (clock_freq >= (polaris10_clock_stretcher_ddt_table[type][i][0]) * 100) { - cks_setting |= 0x2; - if (clock_freq < (polaris10_clock_stretcher_ddt_table[type][i][1]) * 100) - cks_setting |= 0x1; - } - data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting - |= cks_setting << (j * 2); - } - CONVERT_FROM_HOST_TO_SMC_US( - data->smc_state_table.ClockStretcherDataTable.ClockStretcherDataTableEntry[i].setting); - } - value = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL); value &= 0xFFFFFFFE; cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixPWR_CKS_CNTL, value); @@ -1945,9 +1913,8 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, if (POLARIS10_VOLTAGE_CONTROL_BY_SVID2 == data->mvdd_control) { config = VR_SVI2_PLANE_2; table->VRConfig |= (config << VRCONF_MVDD_SHIFT); - } else if (POLARIS10_VOLTAGE_CONTROL_BY_GPIO == data->mvdd_control) { - config = VR_SMIO_PATTERN_2; - table->VRConfig |= (config << VRCONF_MVDD_SHIFT); + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, data->soft_regs_start + + offsetof(SMU74_SoftRegisters, AllowMvddSwitch), 0x1); } else { config = VR_STATIC_VOLTAGE; table->VRConfig |= (config << VRCONF_MVDD_SHIFT); @@ -1956,6 +1923,90 @@ static int polaris10_populate_vr_config(struct pp_hwmgr *hwmgr, return 0; } + +int polaris10_populate_avfs_parameters(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + SMU74_Discrete_DpmTable *table = &(data->smc_state_table); + int result = 0; + struct pp_atom_ctrl__avfs_parameters avfs_params = {0}; + AVFS_meanNsigma_t AVFS_meanNsigma = { {0} }; + AVFS_Sclk_Offset_t AVFS_SclkOffset = { {0} }; + uint32_t tmp, i; + struct pp_smumgr *smumgr = hwmgr->smumgr; + struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + + struct phm_ppt_v1_information *table_info = + (struct phm_ppt_v1_information *)hwmgr->pptable; + struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table = + table_info->vdd_dep_on_sclk; + + + if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) + return result; + + result = atomctrl_get_avfs_information(hwmgr, &avfs_params); + + if (0 == result) { + table->BTCGB_VDROOP_TABLE[0].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a0); + table->BTCGB_VDROOP_TABLE[0].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a1); + table->BTCGB_VDROOP_TABLE[0].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSON_a2); + table->BTCGB_VDROOP_TABLE[1].a0 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a0); + table->BTCGB_VDROOP_TABLE[1].a1 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a1); + table->BTCGB_VDROOP_TABLE[1].a2 = PP_HOST_TO_SMC_UL(avfs_params.ulGB_VDROOP_TABLE_CKSOFF_a2); + table->AVFSGB_VDROOP_TABLE[0].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_m1); + table->AVFSGB_VDROOP_TABLE[0].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSON_m2); + table->AVFSGB_VDROOP_TABLE[0].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSON_b); + table->AVFSGB_VDROOP_TABLE[0].m1_shift = 24; + table->AVFSGB_VDROOP_TABLE[0].m2_shift = 12; + table->AVFSGB_VDROOP_TABLE[1].m1 = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_m1); + table->AVFSGB_VDROOP_TABLE[1].m2 = PP_HOST_TO_SMC_US(avfs_params.usAVFSGB_FUSE_TABLE_CKSOFF_m2); + table->AVFSGB_VDROOP_TABLE[1].b = PP_HOST_TO_SMC_UL(avfs_params.ulAVFSGB_FUSE_TABLE_CKSOFF_b); + table->AVFSGB_VDROOP_TABLE[1].m1_shift = 24; + table->AVFSGB_VDROOP_TABLE[1].m2_shift = 12; + table->MaxVoltage = PP_HOST_TO_SMC_US(avfs_params.usMaxVoltage_0_25mv); + AVFS_meanNsigma.Aconstant[0] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant0); + AVFS_meanNsigma.Aconstant[1] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant1); + AVFS_meanNsigma.Aconstant[2] = PP_HOST_TO_SMC_UL(avfs_params.ulAVFS_meanNsigma_Acontant2); + AVFS_meanNsigma.DC_tol_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_DC_tol_sigma); + AVFS_meanNsigma.Platform_mean = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_mean); + AVFS_meanNsigma.PSM_Age_CompFactor = PP_HOST_TO_SMC_US(avfs_params.usPSM_Age_ComFactor); + AVFS_meanNsigma.Platform_sigma = PP_HOST_TO_SMC_US(avfs_params.usAVFS_meanNsigma_Platform_sigma); + + for (i = 0; i < NUM_VFT_COLUMNS; i++) { + AVFS_meanNsigma.Static_Voltage_Offset[i] = (uint8_t)(sclk_table->entries[i].cks_voffset * 100 / 625); + AVFS_SclkOffset.Sclk_Offset[i] = PP_HOST_TO_SMC_US((uint16_t)(sclk_table->entries[i].sclk_offset) / 100); + } + + result = polaris10_read_smc_sram_dword(smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsMeanNSigma), + &tmp, data->sram_end); + + polaris10_copy_bytes_to_smc(smumgr, + tmp, + (uint8_t *)&AVFS_meanNsigma, + sizeof(AVFS_meanNsigma_t), + data->sram_end); + + result = polaris10_read_smc_sram_dword(smumgr, + SMU7_FIRMWARE_HEADER_LOCATION + offsetof(SMU74_Firmware_Header, AvfsSclkOffsetTable), + &tmp, data->sram_end); + polaris10_copy_bytes_to_smc(smumgr, + tmp, + (uint8_t *)&AVFS_SclkOffset, + sizeof(AVFS_Sclk_Offset_t), + data->sram_end); + + data->avfs_vdroop_override_setting = (avfs_params.ucEnableGB_VDROOP_TABLE_CKSON << BTCGB0_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_VDROOP_TABLE_CKSOFF << BTCGB1_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_FUSE_TABLE_CKSON << AVFSGB0_Vdroop_Enable_SHIFT) | + (avfs_params.ucEnableGB_FUSE_TABLE_CKSOFF << AVFSGB1_Vdroop_Enable_SHIFT); + data->apply_avfs_cks_off_voltage = (avfs_params.ucEnableApplyAVFS_CKS_OFF_Voltage == 1) ? true : false; + } + return result; +} + + /** * Initializes the SMC table and uploads it * @@ -2056,6 +2107,10 @@ static int polaris10_init_smc_table(struct pp_hwmgr *hwmgr) "Failed to populate Clock Stretcher Data Table!", return result); } + + result = polaris10_populate_avfs_parameters(hwmgr); + PP_ASSERT_WITH_CODE(0 == result, "Failed to populate AVFS Parameters!", return result;); + table->CurrSclkPllRange = 0xff; table->GraphicsVoltageChangeEnable = 1; table->GraphicsThermThrottleEnable = 1; @@ -2229,6 +2284,17 @@ static int polaris10_enable_ulv(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_disable_ulv(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_ulv_parm *ulv = &(data->ulv); + + if (ulv->ulv_supported) + return smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_DisableULV); + + return 0; +} + static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) { if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -2249,9 +2315,27 @@ static int polaris10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr) +{ + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_SclkDeepSleep)) { + if (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MASTER_DeepSleep_OFF)) { + PP_ASSERT_WITH_CODE(false, + "Attempt to disable Master Deep Sleep switch failed!", + return -1); + } + } + + return 0; +} + static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + uint32_t soft_register_value = 0; + uint32_t handshake_disables_offset = data->soft_regs_start + + offsetof(SMU74_SoftRegisters, HandshakeDisables); /* enable SCLK dpm */ if (!data->sclk_dpm_key_disabled) @@ -2262,6 +2346,12 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) /* enable MCLK dpm */ if (0 == data->mclk_dpm_key_disabled) { +/* Disable UVD - SMU handshake for MCLK. */ + soft_register_value = cgs_read_ind_register(hwmgr->device, + CGS_IND_REG__SMC, handshake_disables_offset); + soft_register_value |= SMU7_UVD_MCLK_HANDSHAKE_DISABLE; + cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, + handshake_disables_offset, soft_register_value); PP_ASSERT_WITH_CODE( (0 == smum_send_msg_to_smc(hwmgr->smumgr, @@ -2269,7 +2359,6 @@ static int polaris10_enable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) "Failed to enable MCLK DPM during DPM Start Function!", return -1); - PHM_WRITE_FIELD(hwmgr->device, MC_SEQ_CNTL_3, CAC_EN, 0x1); cgs_write_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixLCAC_MC0_CNTL, 0x5); @@ -2338,6 +2427,58 @@ static int polaris10_start_dpm(struct pp_hwmgr *hwmgr) return 0; } +static int polaris10_disable_sclk_mclk_dpm(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + + /* disable SCLK dpm */ + if (!data->sclk_dpm_key_disabled) + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_DPM_Disable) == 0), + "Failed to disable SCLK DPM!", + return -1); + + /* disable MCLK dpm */ + if (!data->mclk_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_MCLKDPM_Disable) == 0), + "Failed to disable MCLK DPM!", + return -1); + } + + return 0; +} + +static int polaris10_stop_dpm(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + + /* disable general power management */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, GENERAL_PWRMGT, + GLOBAL_PWRMGT_EN, 0); + /* disable sclk deep sleep */ + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, SCLK_PWRMGT_CNTL, + DYNAMIC_PM_EN, 0); + + /* disable PCIE dpm */ + if (!data->pcie_dpm_key_disabled) { + PP_ASSERT_WITH_CODE( + (smum_send_msg_to_smc(hwmgr->smumgr, + PPSMC_MSG_PCIeDPM_Disable) == 0), + "Failed to disable pcie DPM during DPM Stop Function!", + return -1); + } + + if (polaris10_disable_sclk_mclk_dpm(hwmgr)) { + printk(KERN_ERR "Failed to disable Sclk DPM and Mclk DPM!"); + return -1; + } + + return 0; +} + static void polaris10_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources) { bool protection; @@ -2395,6 +2536,23 @@ static int polaris10_enable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) return polaris10_enable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); } +static int polaris10_disable_auto_throttle_source(struct pp_hwmgr *hwmgr, + PHM_AutoThrottleSource source) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + + if (data->active_auto_throttle_sources & (1 << source)) { + data->active_auto_throttle_sources &= ~(1 << source); + polaris10_set_dpm_event_sources(hwmgr, data->active_auto_throttle_sources); + } + return 0; +} + +static int polaris10_disable_thermal_auto_throttle(struct pp_hwmgr *hwmgr) +{ + return polaris10_disable_auto_throttle_source(hwmgr, PHM_AutoThrottleSource_Thermal); +} + int polaris10_pcie_performance_request(struct pp_hwmgr *hwmgr) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -2516,8 +2674,60 @@ int polaris10_enable_dpm_tasks(struct pp_hwmgr *hwmgr) int polaris10_disable_dpm_tasks(struct pp_hwmgr *hwmgr) { + int tmp_result, result = 0; - return 0; + tmp_result = (polaris10_is_dpm_running(hwmgr)) ? 0 : -1; + PP_ASSERT_WITH_CODE(tmp_result == 0, + "DPM is not running right now, no need to disable DPM!", + return 0); + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ThermalController)) + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, 1); + + tmp_result = polaris10_disable_power_containment(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable power containment!", result = tmp_result); + + tmp_result = polaris10_disable_smc_cac(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable SMC CAC!", result = tmp_result); + + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + CG_SPLL_SPREAD_SPECTRUM, SSEN, 0); + PHM_WRITE_INDIRECT_FIELD(hwmgr->device, CGS_IND_REG__SMC, + GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, 0); + + tmp_result = polaris10_disable_thermal_auto_throttle(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable thermal auto throttle!", result = tmp_result); + + tmp_result = polaris10_stop_dpm(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to stop DPM!", result = tmp_result); + + tmp_result = polaris10_disable_deep_sleep_master_switch(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable deep sleep master switch!", result = tmp_result); + + tmp_result = polaris10_disable_ulv(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to disable ULV!", result = tmp_result); + + tmp_result = polaris10_clear_voting_clients(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to clear voting clients!", result = tmp_result); + + tmp_result = polaris10_reset_to_default(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to reset to default!", result = tmp_result); + + tmp_result = polaris10_force_switch_to_arbf0(hwmgr); + PP_ASSERT_WITH_CODE((tmp_result == 0), + "Failed to force to switch arbf0!", result = tmp_result); + + return result; } int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr) @@ -2528,13 +2738,6 @@ int polaris10_reset_asic_tasks(struct pp_hwmgr *hwmgr) int polaris10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (data->soft_pp_table) { - kfree(data->soft_pp_table); - data->soft_pp_table = NULL; - } - return phm_hwmgr_backend_fini(hwmgr); } @@ -2590,8 +2793,13 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) phm_cap_unset(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping); - phm_cap_set(hwmgr->platform_descriptor.platformCaps, - PHM_PlatformCaps_PowerContainment); + if (hwmgr->powercontainment_enabled) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + else + phm_cap_unset(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment); + phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_CAC); @@ -2606,6 +2814,7 @@ int polaris10_set_features_platform_caps(struct pp_hwmgr *hwmgr) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_FanSpeedInTableIsRPM); + if (hwmgr->chip_id == CHIP_POLARIS11) phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SPLLShutdownSupport); @@ -2662,12 +2871,12 @@ static int polaris10_get_evv_voltages(struct pp_hwmgr *hwmgr) } } - - PP_ASSERT_WITH_CODE(0 == atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, - VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc), - "Error retrieving EVV voltage value!", - continue); - + if (atomctrl_get_voltage_evv_on_sclk_ai(hwmgr, + VOLTAGE_TYPE_VDDC, + sclk, vv_id, &vddc) != 0) { + printk(KERN_WARNING "failed to retrieving EVV voltage!\n"); + continue; + } /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */ PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0), @@ -2898,13 +3107,19 @@ static int polaris10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr) int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_hwmgr *data; struct pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; uint32_t temp_reg; int result; struct phm_ppt_v1_information *table_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); + data = kzalloc(sizeof(struct polaris10_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + data->dll_default_on = false; data->sram_end = SMC_RAM_END; data->mclk_dpm0_activity_target = 0xa; @@ -2938,6 +3153,11 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) data->vddci_control = POLARIS10_VOLTAGE_CONTROL_NONE; data->mvdd_control = POLARIS10_VOLTAGE_CONTROL_NONE; + data->enable_tdc_limit_feature = true; + data->enable_pkg_pwr_tracking_feature = true; + data->force_pcie_gen = PP_PCIEGenInvalid; + data->mclk_stutter_mode_threshold = 40000; + if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) data->voltage_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; @@ -2962,6 +3182,10 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) data->vddci_control = POLARIS10_VOLTAGE_CONTROL_BY_SVID2; } + if (table_info->cac_dtp_table->usClockStretchAmount != 0) + phm_cap_set(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_ClockStretcher); + polaris10_set_features_platform_caps(hwmgr); polaris10_init_dpm_defaults(hwmgr); @@ -3068,7 +3292,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_gen_cap = 0x30007; + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; else data->pcie_gen_cap = (uint32_t)sys_info.value; if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) @@ -3077,7 +3301,7 @@ int polaris10_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_lane_cap = 0x2f0000; + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; else data->pcie_lane_cap = (uint32_t)sys_info.value; @@ -3520,10 +3744,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, ATOM_Tonga_State *state_entry = (ATOM_Tonga_State *)state; ATOM_Tonga_POWERPLAYTABLE *powerplay_table = (ATOM_Tonga_POWERPLAYTABLE *)pp_table; - ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = - (ATOM_Tonga_SCLK_Dependency_Table *) + PPTable_Generic_SubTable_Header *sclk_dep_table = + (PPTable_Generic_SubTable_Header *) (((unsigned long)powerplay_table) + le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); + ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = (ATOM_Tonga_MCLK_Dependency_Table *) (((unsigned long)powerplay_table) + @@ -3575,7 +3800,11 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, /* Performance levels are arranged from low to high. */ performance_level->memory_clock = mclk_dep_table->entries [state_entry->ucMemoryClockIndexLow].ulMclk; - performance_level->engine_clock = sclk_dep_table->entries + if (sclk_dep_table->ucRevId == 0) + performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries + [state_entry->ucEngineClockIndexLow].ulSclk; + else if (sclk_dep_table->ucRevId == 1) + performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries [state_entry->ucEngineClockIndexLow].ulSclk; performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, state_entry->ucPCIEGenLow); @@ -3586,8 +3815,14 @@ static int polaris10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr, [polaris10_power_state->performance_level_count++]); performance_level->memory_clock = mclk_dep_table->entries [state_entry->ucMemoryClockIndexHigh].ulMclk; - performance_level->engine_clock = sclk_dep_table->entries + + if (sclk_dep_table->ucRevId == 0) + performance_level->engine_clock = ((ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table)->entries + [state_entry->ucEngineClockIndexHigh].ulSclk; + else if (sclk_dep_table->ucRevId == 1) + performance_level->engine_clock = ((ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table)->entries [state_entry->ucEngineClockIndexHigh].ulSclk; + performance_level->pcie_gen = get_pcie_gen_support(data->pcie_gen_cap, state_entry->ucPCIEGenHigh); performance_level->pcie_lane = get_pcie_lane_support(data->pcie_lane_cap, @@ -3645,7 +3880,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr, switch (state->classification.ui_label) { case PP_StateUILabel_Performance: data->use_pcie_performance_levels = true; - for (i = 0; i < ps->performance_level_count; i++) { if (data->pcie_gen_performance.max < ps->performance_levels[i].pcie_gen) @@ -3661,7 +3895,6 @@ static int polaris10_get_pp_table_entry(struct pp_hwmgr *hwmgr, ps->performance_levels[i].pcie_lane) data->pcie_lane_performance.max = ps->performance_levels[i].pcie_lane; - if (data->pcie_lane_performance.min > ps->performance_levels[i].pcie_lane) data->pcie_lane_performance.min = @@ -4187,12 +4420,9 @@ int polaris10_update_samu_dpm(struct pp_hwmgr *hwmgr, bool bgate) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); uint32_t mm_boot_level_offset, mm_boot_level_value; - struct phm_ppt_v1_information *table_info = - (struct phm_ppt_v1_information *)(hwmgr->pptable); if (!bgate) { - data->smc_state_table.SamuBootLevel = - (uint8_t) (table_info->mm_dep_table->count - 1); + data->smc_state_table.SamuBootLevel = 0; mm_boot_level_offset = data->dpm_table_start + offsetof(SMU74_Discrete_DpmTable, SamuBootLevel); mm_boot_level_offset /= 4; @@ -4721,42 +4951,6 @@ int polaris10_setup_asic_task(struct pp_hwmgr *hwmgr) return result; } -static int polaris10_get_pp_table(struct pp_hwmgr *hwmgr, char **table) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, - hwmgr->soft_pp_table_size, - GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - *table = (char *)&data->soft_pp_table; - - return hwmgr->soft_pp_table_size; -} - -static int polaris10_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) -{ - struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - memcpy(data->soft_pp_table, buf, size); - - hwmgr->soft_pp_table = data->soft_pp_table; - - /* TODO: re-init powerplay to implement modified pptable */ - - return 0; -} - static int polaris10_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { @@ -4899,6 +5093,89 @@ static int polaris10_get_fan_control_mode(struct pp_hwmgr *hwmgr) CG_FDO_CTRL2, FDO_PWM_MODE); } +static int polaris10_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct polaris10_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int polaris10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct polaris10_power_state *polaris10_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); + + polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int polaris10_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct polaris10_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int polaris10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + struct polaris10_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct polaris10_power_state *polaris10_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + polaris10_ps = cast_phw_polaris10_power_state(&ps->hardware); + + polaris10_ps->performance_levels[polaris10_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} static const struct pp_hwmgr_func polaris10_hwmgr_funcs = { .backend_init = &polaris10_hwmgr_backend_init, .backend_fini = &polaris10_hwmgr_backend_fini, @@ -4937,22 +5214,17 @@ static const struct pp_hwmgr_func polaris10_hwmgr_funcs = { .check_states_equal = polaris10_check_states_equal, .set_fan_control_mode = polaris10_set_fan_control_mode, .get_fan_control_mode = polaris10_get_fan_control_mode, - .get_pp_table = polaris10_get_pp_table, - .set_pp_table = polaris10_set_pp_table, .force_clock_level = polaris10_force_clock_level, .print_clock_levels = polaris10_print_clock_levels, .enable_per_cu_power_gating = polaris10_phm_enable_per_cu_power_gating, + .get_sclk_od = polaris10_get_sclk_od, + .set_sclk_od = polaris10_set_sclk_od, + .get_mclk_od = polaris10_get_mclk_od, + .set_mclk_od = polaris10_set_mclk_od, }; int polaris10_hwmgr_init(struct pp_hwmgr *hwmgr) { - struct polaris10_hwmgr *data; - - data = kzalloc (sizeof(struct polaris10_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - hwmgr->backend = data; hwmgr->hwmgr_func = &polaris10_hwmgr_funcs; hwmgr->pptable_func = &tonga_pptable_funcs; pp_polaris10_thermal_initialize(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h index beedf35cbfa6..fd38b0d7a3c2 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_hwmgr.h @@ -309,9 +309,8 @@ struct polaris10_hwmgr { uint32_t up_hyst; uint32_t disable_dpm_mask; bool apply_optimized_settings; - - /* soft pptable for re-uploading into smu */ - void *soft_pp_table; + uint32_t avfs_vdroop_override_setting; + bool apply_avfs_cks_off_voltage; }; /* To convert to Q8.8 format for firmware */ diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c index 0b99ab3ba0c5..5620e268b553 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.c @@ -286,7 +286,7 @@ int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr) if (polaris10_copy_bytes_to_smc(hwmgr->smumgr, pm_fuse_table_offset, (uint8_t *)&data->power_tune_table, - sizeof(struct SMU74_Discrete_PmFuses), data->sram_end)) + (sizeof(struct SMU74_Discrete_PmFuses) - 92), data->sram_end)) PP_ASSERT_WITH_CODE(false, "Attempt to download PmFuseTable Failed!", return -EINVAL); @@ -312,6 +312,23 @@ int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr) return result; } +int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_CAC) && data->cac_enabled) { + int smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableCac)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable CAC in SMC.", result = -1); + + data->cac_enabled = false; + } + return result; +} + int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n) { struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); @@ -373,6 +390,48 @@ int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr) return result; } +int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr) +{ + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); + int result = 0; + + if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, + PHM_PlatformCaps_PowerContainment) && + data->power_containment_features) { + int smc_result; + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_TDCLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_TDCLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable TDCLimit in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_DTE) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_DisableDTE)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable DTE in SMC.", + result = smc_result); + } + + if (data->power_containment_features & + POWERCONTAINMENT_FEATURE_PkgPwrLimit) { + smc_result = smum_send_msg_to_smc(hwmgr->smumgr, + (uint16_t)(PPSMC_MSG_PkgPwrLimitDisable)); + PP_ASSERT_WITH_CODE((smc_result == 0), + "Failed to disable PkgPwrTracking in SMC.", + result = smc_result); + } + data->power_containment_features = 0; + } + + return result; +} + int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr) { struct phm_ppt_v1_information *table_info = diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h index 68bc1cb6d40c..d492d6d28867 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_powertune.h @@ -31,6 +31,19 @@ enum polaris10_pt_config_reg_type { POLARIS10_CONFIGREG_MAX }; +#define DIDT_SQ_CTRL0__UNUSED_0_MASK 0xfffc0000 +#define DIDT_SQ_CTRL0__UNUSED_0__SHIFT 0x12 +#define DIDT_TD_CTRL0__UNUSED_0_MASK 0xfffc0000 +#define DIDT_TD_CTRL0__UNUSED_0__SHIFT 0x12 +#define DIDT_TCP_CTRL0__UNUSED_0_MASK 0xfffc0000 +#define DIDT_TCP_CTRL0__UNUSED_0__SHIFT 0x12 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 +#define DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e +#define DIDT_TD_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 +#define DIDT_TD_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e +#define DIDT_TCP_TUNING_CTRL__UNUSED_0_MASK 0xc0000000 +#define DIDT_TCP_TUNING_CTRL__UNUSED_0__SHIFT 0x0000001e + /* PowerContainment Features */ #define POWERCONTAINMENT_FEATURE_DTE 0x00000001 #define POWERCONTAINMENT_FEATURE_TDCLimit 0x00000002 @@ -62,7 +75,9 @@ void polaris10_initialize_power_tune_defaults(struct pp_hwmgr *hwmgr); int polaris10_populate_bapm_parameters_in_dpm_table(struct pp_hwmgr *hwmgr); int polaris10_populate_pm_fuses(struct pp_hwmgr *hwmgr); int polaris10_enable_smc_cac(struct pp_hwmgr *hwmgr); +int polaris10_disable_smc_cac(struct pp_hwmgr *hwmgr); int polaris10_enable_power_containment(struct pp_hwmgr *hwmgr); +int polaris10_disable_power_containment(struct pp_hwmgr *hwmgr); int polaris10_set_power_limit(struct pp_hwmgr *hwmgr, uint32_t n); int polaris10_power_control_set_level(struct pp_hwmgr *hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c index aba167f7d167..b206632d4650 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/polaris10_thermal.c @@ -625,10 +625,14 @@ static int tf_polaris10_thermal_avfs_enable(struct pp_hwmgr *hwmgr, int ret; struct pp_smumgr *smumgr = (struct pp_smumgr *)(hwmgr->smumgr); struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(smumgr->backend); + struct polaris10_hwmgr *data = (struct polaris10_hwmgr *)(hwmgr->backend); - if (smu_data->avfs.avfs_btc_status != AVFS_BTC_ENABLEAVFS) + if (smu_data->avfs.avfs_btc_status == AVFS_BTC_NOTSUPPORTED) return 0; + ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, + PPSMC_MSG_SetGBDroopSettings, data->avfs_vdroop_override_setting); + ret = (smum_send_msg_to_smc(smumgr, PPSMC_MSG_EnableAvfs) == 0) ? 0 : -1; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c index 58742e0d1492..a3c38bbd1e94 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/pp_acpi.c @@ -44,6 +44,20 @@ bool acpi_atcs_functions_supported(void *device, uint32_t index) return result == 0 ? (output_buf.function_bits & (1 << (index - 1))) != 0 : false; } +bool acpi_atcs_notify_pcie_device_ready(void *device) +{ + int32_t temp_buffer = 1; + + return cgs_call_acpi_method(device, CGS_ACPI_METHOD_ATCS, + ATCS_FUNCTION_PCIE_DEVICE_READY_NOTIFICATION, + &temp_buffer, + NULL, + 0, + sizeof(temp_buffer), + 0); +} + + int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) { struct atcs_pref_req_input atcs_input; @@ -52,7 +66,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) int result; struct cgs_system_info info = {0}; - if (!acpi_atcs_functions_supported(device, ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST)) + if( 0 != acpi_atcs_notify_pcie_device_ready(device)) return -EINVAL; info.size = sizeof(struct cgs_system_info); @@ -77,7 +91,7 @@ int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise) ATCS_FUNCTION_PCIE_PERFORMANCE_REQUEST, &atcs_input, &atcs_output, - 0, + 1, sizeof(atcs_input), sizeof(atcs_output)); if (result != 0) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c index da9f5f1b6dc2..5d70e2c47faf 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.c @@ -552,13 +552,13 @@ static bool atomctrl_lookup_gpio_pin( pin_assignment->ucGpioPinBitShift; gpio_pin_assignment->us_gpio_pin_aindex = le16_to_cpu(pin_assignment->usGpioPin_AIndex); - return false; + return true; } offset += offsetof(ATOM_GPIO_PIN_ASSIGNMENT, ucGPIO_ID) + 1; } - return true; + return false; } /** @@ -1302,3 +1302,46 @@ int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctr return 0; } + +int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param) +{ + ATOM_ASIC_PROFILING_INFO_V3_6 *profile = NULL; + + if (param == NULL) + return -EINVAL; + + profile = (ATOM_ASIC_PROFILING_INFO_V3_6 *) + cgs_atom_get_data_table(hwmgr->device, + GetIndexIntoMasterTable(DATA, ASIC_ProfilingInfo), + NULL, NULL, NULL); + if (!profile) + return -1; + + param->ulAVFS_meanNsigma_Acontant0 = profile->ulAVFS_meanNsigma_Acontant0; + param->ulAVFS_meanNsigma_Acontant1 = profile->ulAVFS_meanNsigma_Acontant1; + param->ulAVFS_meanNsigma_Acontant2 = profile->ulAVFS_meanNsigma_Acontant2; + param->usAVFS_meanNsigma_DC_tol_sigma = profile->usAVFS_meanNsigma_DC_tol_sigma; + param->usAVFS_meanNsigma_Platform_mean = profile->usAVFS_meanNsigma_Platform_mean; + param->usAVFS_meanNsigma_Platform_sigma = profile->usAVFS_meanNsigma_Platform_sigma; + param->ulGB_VDROOP_TABLE_CKSOFF_a0 = profile->ulGB_VDROOP_TABLE_CKSOFF_a0; + param->ulGB_VDROOP_TABLE_CKSOFF_a1 = profile->ulGB_VDROOP_TABLE_CKSOFF_a1; + param->ulGB_VDROOP_TABLE_CKSOFF_a2 = profile->ulGB_VDROOP_TABLE_CKSOFF_a2; + param->ulGB_VDROOP_TABLE_CKSON_a0 = profile->ulGB_VDROOP_TABLE_CKSON_a0; + param->ulGB_VDROOP_TABLE_CKSON_a1 = profile->ulGB_VDROOP_TABLE_CKSON_a1; + param->ulGB_VDROOP_TABLE_CKSON_a2 = profile->ulGB_VDROOP_TABLE_CKSON_a2; + param->ulAVFSGB_FUSE_TABLE_CKSOFF_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_m1; + param->usAVFSGB_FUSE_TABLE_CKSOFF_m2 = profile->usAVFSGB_FUSE_TABLE_CKSOFF_m2; + param->ulAVFSGB_FUSE_TABLE_CKSOFF_b = profile->ulAVFSGB_FUSE_TABLE_CKSOFF_b; + param->ulAVFSGB_FUSE_TABLE_CKSON_m1 = profile->ulAVFSGB_FUSE_TABLE_CKSON_m1; + param->usAVFSGB_FUSE_TABLE_CKSON_m2 = profile->usAVFSGB_FUSE_TABLE_CKSON_m2; + param->ulAVFSGB_FUSE_TABLE_CKSON_b = profile->ulAVFSGB_FUSE_TABLE_CKSON_b; + param->usMaxVoltage_0_25mv = profile->usMaxVoltage_0_25mv; + param->ucEnableGB_VDROOP_TABLE_CKSOFF = profile->ucEnableGB_VDROOP_TABLE_CKSOFF; + param->ucEnableGB_VDROOP_TABLE_CKSON = profile->ucEnableGB_VDROOP_TABLE_CKSON; + param->ucEnableGB_FUSE_TABLE_CKSOFF = profile->ucEnableGB_FUSE_TABLE_CKSOFF; + param->ucEnableGB_FUSE_TABLE_CKSON = profile->ucEnableGB_FUSE_TABLE_CKSON; + param->usPSM_Age_ComFactor = profile->usPSM_Age_ComFactor; + param->ucEnableApplyAVFS_CKS_OFF_Voltage = profile->ucEnableApplyAVFS_CKS_OFF_Voltage; + + return 0; +} diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h index d24ebb566905..248c5db5f380 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomctrl.h @@ -250,6 +250,35 @@ struct pp_atomctrl_gpio_pin_assignment { }; typedef struct pp_atomctrl_gpio_pin_assignment pp_atomctrl_gpio_pin_assignment; +struct pp_atom_ctrl__avfs_parameters { + uint32_t ulAVFS_meanNsigma_Acontant0; + uint32_t ulAVFS_meanNsigma_Acontant1; + uint32_t ulAVFS_meanNsigma_Acontant2; + uint16_t usAVFS_meanNsigma_DC_tol_sigma; + uint16_t usAVFS_meanNsigma_Platform_mean; + uint16_t usAVFS_meanNsigma_Platform_sigma; + uint32_t ulGB_VDROOP_TABLE_CKSOFF_a0; + uint32_t ulGB_VDROOP_TABLE_CKSOFF_a1; + uint32_t ulGB_VDROOP_TABLE_CKSOFF_a2; + uint32_t ulGB_VDROOP_TABLE_CKSON_a0; + uint32_t ulGB_VDROOP_TABLE_CKSON_a1; + uint32_t ulGB_VDROOP_TABLE_CKSON_a2; + uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_m1; + uint16_t usAVFSGB_FUSE_TABLE_CKSOFF_m2; + uint32_t ulAVFSGB_FUSE_TABLE_CKSOFF_b; + uint32_t ulAVFSGB_FUSE_TABLE_CKSON_m1; + uint16_t usAVFSGB_FUSE_TABLE_CKSON_m2; + uint32_t ulAVFSGB_FUSE_TABLE_CKSON_b; + uint16_t usMaxVoltage_0_25mv; + uint8_t ucEnableGB_VDROOP_TABLE_CKSOFF; + uint8_t ucEnableGB_VDROOP_TABLE_CKSON; + uint8_t ucEnableGB_FUSE_TABLE_CKSOFF; + uint8_t ucEnableGB_FUSE_TABLE_CKSON; + uint16_t usPSM_Age_ComFactor; + uint8_t ucEnableApplyAVFS_CKS_OFF_Voltage; + uint8_t ucReserved; +}; + extern bool atomctrl_get_pp_assign_pin(struct pp_hwmgr *hwmgr, const uint32_t pinId, pp_atomctrl_gpio_pin_assignment *gpio_pin_assignment); extern int atomctrl_get_voltage_evv_on_sclk(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); extern uint32_t atomctrl_get_mpll_reference_clock(struct pp_hwmgr *hwmgr); @@ -278,5 +307,8 @@ extern int atomctrl_set_ac_timing_ai(struct pp_hwmgr *hwmgr, uint32_t memory_clo extern int atomctrl_get_voltage_evv_on_sclk_ai(struct pp_hwmgr *hwmgr, uint8_t voltage_type, uint32_t sclk, uint16_t virtual_voltage_Id, uint16_t *voltage); extern int atomctrl_get_smc_sclk_range_table(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl_sclk_range_table *table); + +extern int atomctrl_get_avfs_information(struct pp_hwmgr *hwmgr, struct pp_atom_ctrl__avfs_parameters *param); + #endif diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c index 2f1a14fe05b1..35bc8a29b773 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/processpptables.c @@ -794,15 +794,18 @@ static const ATOM_PPLIB_STATE_V2 *get_state_entry_v2( static const ATOM_PPLIB_POWERPLAYTABLE *get_powerplay_table( struct pp_hwmgr *hwmgr) { - const void *table_addr = NULL; + const void *table_addr = hwmgr->soft_pp_table; uint8_t frev, crev; uint16_t size; - table_addr = cgs_atom_get_data_table(hwmgr->device, - GetIndexIntoMasterTable(DATA, PowerPlayInfo), - &size, &frev, &crev); + if (!table_addr) { + table_addr = cgs_atom_get_data_table(hwmgr->device, + GetIndexIntoMasterTable(DATA, PowerPlayInfo), + &size, &frev, &crev); - hwmgr->soft_pp_table = table_addr; + hwmgr->soft_pp_table = table_addr; + hwmgr->soft_pp_table_size = size; + } return (const ATOM_PPLIB_POWERPLAYTABLE *)table_addr; } @@ -1499,7 +1502,7 @@ int get_number_of_vce_state_table_entries( const ATOM_PPLIB_VCE_State_Table *vce_table = get_vce_state_table(hwmgr, table); - if (vce_table > 0) + if (vce_table) return vce_table->numEntries; return 0; @@ -1589,11 +1592,6 @@ static int pp_tables_initialize(struct pp_hwmgr *hwmgr) static int pp_tables_uninitialize(struct pp_hwmgr *hwmgr) { - if (NULL != hwmgr->soft_pp_table) { - kfree(hwmgr->soft_pp_table); - hwmgr->soft_pp_table = NULL; - } - if (NULL != hwmgr->dyn_state.vddc_dependency_on_sclk) { kfree(hwmgr->dyn_state.vddc_dependency_on_sclk); hwmgr->dyn_state.vddc_dependency_on_sclk = NULL; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c index 16fed487973b..6c4553cf8023 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c @@ -2847,27 +2847,6 @@ static int tonga_setup_default_dpm_tables(struct pp_hwmgr *hwmgr) } } - /* Initialize Vddc DPM table based on allow Vddc values. And populate corresponding std values. */ - for (i = 0; i < allowed_vdd_sclk_table->count; i++) { - data->dpm_table.vddc_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddc; - /* tonga_hwmgr->dpm_table.VddcTable.dpm_levels[i].param1 = stdVoltageTable->entries[i].Leakage; */ - /* param1 is for corresponding std voltage */ - data->dpm_table.vddc_table.dpm_levels[i].enabled = 1; - } - data->dpm_table.vddc_table.count = allowed_vdd_sclk_table->count; - - if (NULL != allowed_vdd_mclk_table) { - /* Initialize Vddci DPM table based on allow Mclk values */ - for (i = 0; i < allowed_vdd_mclk_table->count; i++) { - data->dpm_table.vdd_ci_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].vddci; - data->dpm_table.vdd_ci_table.dpm_levels[i].enabled = 1; - data->dpm_table.mvdd_table.dpm_levels[i].value = allowed_vdd_mclk_table->entries[i].mvdd; - data->dpm_table.mvdd_table.dpm_levels[i].enabled = 1; - } - data->dpm_table.vdd_ci_table.count = allowed_vdd_mclk_table->count; - data->dpm_table.mvdd_table.count = allowed_vdd_mclk_table->count; - } - /* setup PCIE gen speed levels*/ tonga_setup_default_pcie_tables(hwmgr); @@ -3047,8 +3026,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) reg_value = 0; if ((0 == reg_value) && - (0 == atomctrl_get_pp_assign_pin(hwmgr, - VDDC_VRHOT_GPIO_PINID, &gpio_pin_assignment))) { + (atomctrl_get_pp_assign_pin(hwmgr, VDDC_VRHOT_GPIO_PINID, + &gpio_pin_assignment))) { table->VRHotGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_RegulatorHot); @@ -3061,8 +3040,8 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) /* ACDC Switch GPIO */ reg_value = 0; if ((0 == reg_value) && - (0 == atomctrl_get_pp_assign_pin(hwmgr, - PP_AC_DC_SWITCH_GPIO_PINID, &gpio_pin_assignment))) { + (atomctrl_get_pp_assign_pin(hwmgr, PP_AC_DC_SWITCH_GPIO_PINID, + &gpio_pin_assignment))) { table->AcDcGpio = gpio_pin_assignment.uc_gpio_pin_bit_shift; phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_AutomaticDCTransition); @@ -3084,8 +3063,7 @@ int tonga_init_smc_table(struct pp_hwmgr *hwmgr) } reg_value = 0; - if ((0 == reg_value) && - (0 == atomctrl_get_pp_assign_pin(hwmgr, + if ((0 == reg_value) && (atomctrl_get_pp_assign_pin(hwmgr, THERMAL_INT_OUTPUT_GPIO_PINID, &gpio_pin_assignment))) { phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_ThermalOutGPIO); @@ -4443,13 +4421,6 @@ int tonga_reset_asic_tasks(struct pp_hwmgr *hwmgr) int tonga_hwmgr_backend_fini(struct pp_hwmgr *hwmgr) { - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (data->soft_pp_table) { - kfree(data->soft_pp_table); - data->soft_pp_table = NULL; - } - return phm_hwmgr_backend_fini(hwmgr); } @@ -4463,7 +4434,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) { int result = 0; SMU72_Discrete_DpmTable *table = NULL; - tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + tonga_hwmgr *data; pp_atomctrl_gpio_pin_assignment gpio_pin_assignment; struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable); phw_tonga_ulv_parm *ulv; @@ -4472,6 +4443,12 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) PP_ASSERT_WITH_CODE((NULL != hwmgr), "Invalid Parameter!", return -1;); + data = kzalloc(sizeof(struct tonga_hwmgr), GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + hwmgr->backend = data; + data->dll_defaule_on = 0; data->sram_end = SMC_RAM_END; @@ -4510,6 +4487,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) data->vdd_ci_control = TONGA_VOLTAGE_CONTROL_NONE; data->vdd_gfx_control = TONGA_VOLTAGE_CONTROL_NONE; data->mvdd_control = TONGA_VOLTAGE_CONTROL_NONE; + data->force_pcie_gen = PP_PCIEGenInvalid; if (atomctrl_is_voltage_controled_by_gpio_v3(hwmgr, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) { @@ -4591,7 +4569,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) * if ucGPIO_ID=VDDC_PCC_GPIO_PINID in GPIO_LUTable, * Peak Current Control feature is enabled and we should program PCC HW register */ - if (0 == atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { + if (atomctrl_get_pp_assign_pin(hwmgr, VDDC_PCC_GPIO_PINID, &gpio_pin_assignment)) { uint32_t temp_reg = cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixCNB_PWRMGT_CNTL); @@ -4659,7 +4637,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_GEN_INFO; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_gen_cap = 0x30007; + data->pcie_gen_cap = AMDGPU_DEFAULT_PCIE_GEN_MASK; else data->pcie_gen_cap = (uint32_t)sys_info.value; if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) @@ -4668,7 +4646,7 @@ int tonga_hwmgr_backend_init(struct pp_hwmgr *hwmgr) sys_info.info_id = CGS_SYSTEM_INFO_PCIE_MLW; result = cgs_query_system_info(hwmgr->device, &sys_info); if (result) - data->pcie_lane_cap = 0x2f0000; + data->pcie_lane_cap = AMDGPU_DEFAULT_PCIE_MLW_MASK; else data->pcie_lane_cap = (uint32_t)sys_info.value; } else { @@ -6051,42 +6029,6 @@ static int tonga_get_fan_control_mode(struct pp_hwmgr *hwmgr) CG_FDO_CTRL2, FDO_PWM_MODE); } -static int tonga_get_pp_table(struct pp_hwmgr *hwmgr, char **table) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kmemdup(hwmgr->soft_pp_table, - hwmgr->soft_pp_table_size, - GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - *table = (char *)&data->soft_pp_table; - - return hwmgr->soft_pp_table_size; -} - -static int tonga_set_pp_table(struct pp_hwmgr *hwmgr, const char *buf, size_t size) -{ - struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); - - if (!data->soft_pp_table) { - data->soft_pp_table = kzalloc(hwmgr->soft_pp_table_size, GFP_KERNEL); - if (!data->soft_pp_table) - return -ENOMEM; - } - - memcpy(data->soft_pp_table, buf, size); - - hwmgr->soft_pp_table = data->soft_pp_table; - - /* TODO: re-init powerplay to implement modified pptable */ - - return 0; -} - static int tonga_force_clock_level(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask) { @@ -6194,11 +6136,96 @@ static int tonga_print_clock_levels(struct pp_hwmgr *hwmgr, return size; } +static int tonga_get_sclk_od(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *sclk_table = &(data->dpm_table.sclk_table); + struct tonga_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + int value; + + value = (sclk_table->dpm_levels[sclk_table->count - 1].value - + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) * + 100 / + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return value; +} + +static int tonga_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *golden_sclk_table = + &(data->golden_dpm_table.sclk_table); + struct pp_power_state *ps; + struct tonga_power_state *tonga_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + tonga_ps = cast_phw_tonga_power_state(&ps->hardware); + + tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].engine_clock = + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value * + value / 100 + + golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value; + + return 0; +} + +static int tonga_get_mclk_od(struct pp_hwmgr *hwmgr) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *mclk_table = &(data->dpm_table.mclk_table); + struct tonga_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + int value; + + value = (mclk_table->dpm_levels[mclk_table->count - 1].value - + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) * + 100 / + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return value; +} + +static int tonga_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value) +{ + struct tonga_hwmgr *data = (struct tonga_hwmgr *)(hwmgr->backend); + struct tonga_single_dpm_table *golden_mclk_table = + &(data->golden_dpm_table.mclk_table); + struct pp_power_state *ps; + struct tonga_power_state *tonga_ps; + + if (value > 20) + value = 20; + + ps = hwmgr->request_ps; + + if (ps == NULL) + return -EINVAL; + + tonga_ps = cast_phw_tonga_power_state(&ps->hardware); + + tonga_ps->performance_levels[tonga_ps->performance_level_count - 1].memory_clock = + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value * + value / 100 + + golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value; + + return 0; +} + static const struct pp_hwmgr_func tonga_hwmgr_funcs = { .backend_init = &tonga_hwmgr_backend_init, .backend_fini = &tonga_hwmgr_backend_fini, .asic_setup = &tonga_setup_asic_task, .dynamic_state_management_enable = &tonga_enable_dpm_tasks, + .dynamic_state_management_disable = &tonga_disable_dpm_tasks, .apply_state_adjust_rules = tonga_apply_state_adjust_rules, .force_dpm_level = &tonga_force_dpm_level, .power_state_set = tonga_set_power_state_tasks, @@ -6232,22 +6259,16 @@ static const struct pp_hwmgr_func tonga_hwmgr_funcs = { .check_states_equal = tonga_check_states_equal, .set_fan_control_mode = tonga_set_fan_control_mode, .get_fan_control_mode = tonga_get_fan_control_mode, - .get_pp_table = tonga_get_pp_table, - .set_pp_table = tonga_set_pp_table, .force_clock_level = tonga_force_clock_level, .print_clock_levels = tonga_print_clock_levels, + .get_sclk_od = tonga_get_sclk_od, + .set_sclk_od = tonga_set_sclk_od, + .get_mclk_od = tonga_get_mclk_od, + .set_mclk_od = tonga_set_mclk_od, }; int tonga_hwmgr_init(struct pp_hwmgr *hwmgr) { - tonga_hwmgr *data; - - data = kzalloc (sizeof(tonga_hwmgr), GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - memset(data, 0x00, sizeof(tonga_hwmgr)); - - hwmgr->backend = data; hwmgr->hwmgr_func = &tonga_hwmgr_funcs; hwmgr->pptable_func = &tonga_pptable_funcs; pp_tonga_thermal_initialize(hwmgr); diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h index 573cd39fe78d..3961884bfa9b 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.h @@ -352,9 +352,6 @@ struct tonga_hwmgr { bool samu_power_gated; /* 1: gated, 0:not gated */ bool acp_power_gated; /* 1: gated, 0:not gated */ bool pg_acp_init; - - /* soft pptable for re-uploading into smu */ - void *soft_pp_table; }; typedef struct tonga_hwmgr tonga_hwmgr; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h index 1b44f4e9b8f5..f127198aafc4 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_pptable.h @@ -197,6 +197,22 @@ typedef struct _ATOM_Tonga_SCLK_Dependency_Table { ATOM_Tonga_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ } ATOM_Tonga_SCLK_Dependency_Table; +typedef struct _ATOM_Polaris_SCLK_Dependency_Record { + UCHAR ucVddInd; /* Base voltage */ + USHORT usVddcOffset; /* Offset relative to base voltage */ + ULONG ulSclk; + USHORT usEdcCurrent; + UCHAR ucReliabilityTemperature; + UCHAR ucCKSVOffsetandDisable; /* Bits 0~6: Voltage offset for CKS, Bit 7: Disable/enable for the SCLK level. */ + ULONG ulSclkOffset; +} ATOM_Polaris_SCLK_Dependency_Record; + +typedef struct _ATOM_Polaris_SCLK_Dependency_Table { + UCHAR ucRevId; + UCHAR ucNumEntries; /* Number of entries. */ + ATOM_Polaris_SCLK_Dependency_Record entries[1]; /* Dynamically allocate entries. */ +} ATOM_Polaris_SCLK_Dependency_Table; + typedef struct _ATOM_Tonga_PCIE_Record { UCHAR ucPCIEGenSpeed; UCHAR usPCIELaneWidth; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c index 10e3630ee39d..94d6b472e1fe 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_processpptables.c @@ -408,41 +408,78 @@ static int get_mclk_voltage_dependency_table( static int get_sclk_voltage_dependency_table( struct pp_hwmgr *hwmgr, phm_ppt_v1_clock_voltage_dependency_table **pp_tonga_sclk_dep_table, - const ATOM_Tonga_SCLK_Dependency_Table * sclk_dep_table + const PPTable_Generic_SubTable_Header *sclk_dep_table ) { uint32_t table_size, i; phm_ppt_v1_clock_voltage_dependency_table *sclk_table; - PP_ASSERT_WITH_CODE((0 != sclk_dep_table->ucNumEntries), - "Invalid PowerPlay Table!", return -1); + if (sclk_dep_table->ucRevId < 1) { + const ATOM_Tonga_SCLK_Dependency_Table *tonga_table = + (ATOM_Tonga_SCLK_Dependency_Table *)sclk_dep_table; - table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) - * sclk_dep_table->ucNumEntries; + PP_ASSERT_WITH_CODE((0 != tonga_table->ucNumEntries), + "Invalid PowerPlay Table!", return -1); - sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) - kzalloc(table_size, GFP_KERNEL); + table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) + * tonga_table->ucNumEntries; - if (NULL == sclk_table) - return -ENOMEM; + sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); - memset(sclk_table, 0x00, table_size); - - sclk_table->count = (uint32_t)sclk_dep_table->ucNumEntries; - - for (i = 0; i < sclk_dep_table->ucNumEntries; i++) { - sclk_table->entries[i].vddInd = - sclk_dep_table->entries[i].ucVddInd; - sclk_table->entries[i].vdd_offset = - sclk_dep_table->entries[i].usVddcOffset; - sclk_table->entries[i].clk = - sclk_dep_table->entries[i].ulSclk; - sclk_table->entries[i].cks_enable = - (((sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; - sclk_table->entries[i].cks_voffset = - (sclk_dep_table->entries[i].ucCKSVOffsetandDisable & 0x7F); - } + if (NULL == sclk_table) + return -ENOMEM; + memset(sclk_table, 0x00, table_size); + + sclk_table->count = (uint32_t)tonga_table->ucNumEntries; + + for (i = 0; i < tonga_table->ucNumEntries; i++) { + sclk_table->entries[i].vddInd = + tonga_table->entries[i].ucVddInd; + sclk_table->entries[i].vdd_offset = + tonga_table->entries[i].usVddcOffset; + sclk_table->entries[i].clk = + tonga_table->entries[i].ulSclk; + sclk_table->entries[i].cks_enable = + (((tonga_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; + sclk_table->entries[i].cks_voffset = + (tonga_table->entries[i].ucCKSVOffsetandDisable & 0x7F); + } + } else { + const ATOM_Polaris_SCLK_Dependency_Table *polaris_table = + (ATOM_Polaris_SCLK_Dependency_Table *)sclk_dep_table; + + PP_ASSERT_WITH_CODE((0 != polaris_table->ucNumEntries), + "Invalid PowerPlay Table!", return -1); + + table_size = sizeof(uint32_t) + sizeof(phm_ppt_v1_clock_voltage_dependency_record) + * polaris_table->ucNumEntries; + + sclk_table = (phm_ppt_v1_clock_voltage_dependency_table *) + kzalloc(table_size, GFP_KERNEL); + + if (NULL == sclk_table) + return -ENOMEM; + + memset(sclk_table, 0x00, table_size); + + sclk_table->count = (uint32_t)polaris_table->ucNumEntries; + + for (i = 0; i < polaris_table->ucNumEntries; i++) { + sclk_table->entries[i].vddInd = + polaris_table->entries[i].ucVddInd; + sclk_table->entries[i].vdd_offset = + polaris_table->entries[i].usVddcOffset; + sclk_table->entries[i].clk = + polaris_table->entries[i].ulSclk; + sclk_table->entries[i].cks_enable = + (((polaris_table->entries[i].ucCKSVOffsetandDisable & 0x80) >> 7) == 0) ? 1 : 0; + sclk_table->entries[i].cks_voffset = + (polaris_table->entries[i].ucCKSVOffsetandDisable & 0x7F); + sclk_table->entries[i].sclk_offset = polaris_table->entries[i].ulSclkOffset; + } + } *pp_tonga_sclk_dep_table = sclk_table; return 0; @@ -708,8 +745,8 @@ static int init_clock_voltage_dependency( const ATOM_Tonga_MCLK_Dependency_Table *mclk_dep_table = (const ATOM_Tonga_MCLK_Dependency_Table *)(((unsigned long) powerplay_table) + le16_to_cpu(powerplay_table->usMclkDependencyTableOffset)); - const ATOM_Tonga_SCLK_Dependency_Table *sclk_dep_table = - (const ATOM_Tonga_SCLK_Dependency_Table *)(((unsigned long) powerplay_table) + + const PPTable_Generic_SubTable_Header *sclk_dep_table = + (const PPTable_Generic_SubTable_Header *)(((unsigned long) powerplay_table) + le16_to_cpu(powerplay_table->usSclkDependencyTableOffset)); const ATOM_Tonga_Hard_Limit_Table *pHardLimits = (const ATOM_Tonga_Hard_Limit_Table *)(((unsigned long) powerplay_table) + @@ -1040,48 +1077,41 @@ int tonga_pp_tables_uninitialize(struct pp_hwmgr *hwmgr) struct phm_ppt_v1_information *pp_table_information = (struct phm_ppt_v1_information *)(hwmgr->pptable); - if (NULL != hwmgr->soft_pp_table) { - kfree(hwmgr->soft_pp_table); - hwmgr->soft_pp_table = NULL; - } - - if (NULL != pp_table_information->vdd_dep_on_sclk) - pp_table_information->vdd_dep_on_sclk = NULL; + kfree(pp_table_information->vdd_dep_on_sclk); + pp_table_information->vdd_dep_on_sclk = NULL; - if (NULL != pp_table_information->vdd_dep_on_mclk) - pp_table_information->vdd_dep_on_mclk = NULL; + kfree(pp_table_information->vdd_dep_on_mclk); + pp_table_information->vdd_dep_on_mclk = NULL; - if (NULL != pp_table_information->valid_mclk_values) - pp_table_information->valid_mclk_values = NULL; + kfree(pp_table_information->valid_mclk_values); + pp_table_information->valid_mclk_values = NULL; - if (NULL != pp_table_information->valid_sclk_values) - pp_table_information->valid_sclk_values = NULL; + kfree(pp_table_information->valid_sclk_values); + pp_table_information->valid_sclk_values = NULL; - if (NULL != pp_table_information->vddc_lookup_table) - pp_table_information->vddc_lookup_table = NULL; + kfree(pp_table_information->vddc_lookup_table); + pp_table_information->vddc_lookup_table = NULL; - if (NULL != pp_table_information->vddgfx_lookup_table) - pp_table_information->vddgfx_lookup_table = NULL; + kfree(pp_table_information->vddgfx_lookup_table); + pp_table_information->vddgfx_lookup_table = NULL; - if (NULL != pp_table_information->mm_dep_table) - pp_table_information->mm_dep_table = NULL; + kfree(pp_table_information->mm_dep_table); + pp_table_information->mm_dep_table = NULL; - if (NULL != pp_table_information->cac_dtp_table) - pp_table_information->cac_dtp_table = NULL; + kfree(pp_table_information->cac_dtp_table); + pp_table_information->cac_dtp_table = NULL; - if (NULL != hwmgr->dyn_state.cac_dtp_table) - hwmgr->dyn_state.cac_dtp_table = NULL; + kfree(hwmgr->dyn_state.cac_dtp_table); + hwmgr->dyn_state.cac_dtp_table = NULL; - if (NULL != pp_table_information->ppm_parameter_table) - pp_table_information->ppm_parameter_table = NULL; + kfree(pp_table_information->ppm_parameter_table); + pp_table_information->ppm_parameter_table = NULL; - if (NULL != pp_table_information->pcie_table) - pp_table_information->pcie_table = NULL; + kfree(pp_table_information->pcie_table); + pp_table_information->pcie_table = NULL; - if (NULL != hwmgr->pptable) { - kfree(hwmgr->pptable); - hwmgr->pptable = NULL; - } + kfree(hwmgr->pptable); + hwmgr->pptable = NULL; return result; } diff --git a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h index 50b367d44307..b764c8c05ec8 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h +++ b/drivers/gpu/drm/amd/powerplay/inc/amd_powerplay.h @@ -132,6 +132,7 @@ struct amd_pp_init { uint32_t chip_family; uint32_t chip_id; uint32_t rev_id; + bool powercontainment_enabled; }; enum amd_pp_display_config_type{ AMD_PP_DisplayConfigType_None = 0, @@ -342,6 +343,10 @@ struct amd_powerplay_funcs { int (*set_pp_table)(void *handle, const char *buf, size_t size); int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask); int (*print_clock_levels)(void *handle, enum pp_clock_type type, char *buf); + int (*get_sclk_od)(void *handle); + int (*set_sclk_od)(void *handle, uint32_t value); + int (*get_mclk_od)(void *handle); + int (*set_mclk_od)(void *handle, uint32_t value); }; struct amd_powerplay { @@ -355,6 +360,8 @@ int amd_powerplay_init(struct amd_pp_init *pp_init, int amd_powerplay_fini(void *handle); +int amd_powerplay_reset(void *handle); + int amd_powerplay_display_configuration_change(void *handle, const struct amd_pp_display_configuration *input); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h index 56f712c7d07a..962cb5385951 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hardwaremanager.h @@ -340,6 +340,7 @@ extern int phm_powergate_vce(struct pp_hwmgr *hwmgr, bool gate); extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); extern int phm_setup_asic(struct pp_hwmgr *hwmgr); extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); +extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr); extern void phm_init_dynamic_caps(struct pp_hwmgr *hwmgr); extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr); extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block); diff --git a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h index 28f571449495..0bbc42a224e5 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/powerplay/inc/hwmgr.h @@ -278,6 +278,8 @@ struct pp_hwmgr_func { int (*dynamic_state_management_enable)( struct pp_hwmgr *hw_mgr); + int (*dynamic_state_management_disable)( + struct pp_hwmgr *hw_mgr); int (*patch_boot_state)(struct pp_hwmgr *hwmgr, struct pp_hw_power_state *hw_ps); @@ -333,11 +335,13 @@ struct pp_hwmgr_func { int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); int (*power_off_asic)(struct pp_hwmgr *hwmgr); - int (*get_pp_table)(struct pp_hwmgr *hwmgr, char **table); - int (*set_pp_table)(struct pp_hwmgr *hwmgr, const char *buf, size_t size); int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); int (*enable_per_cu_power_gating)(struct pp_hwmgr *hwmgr, bool enable); + int (*get_sclk_od)(struct pp_hwmgr *hwmgr); + int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); + int (*get_mclk_od)(struct pp_hwmgr *hwmgr); + int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); }; struct pp_table_func { @@ -578,6 +582,7 @@ struct pp_hwmgr { struct pp_smumgr *smumgr; const void *soft_pp_table; uint32_t soft_pp_table_size; + void *hardcode_pp_table; bool need_pp_table_upload; enum amd_dpm_forced_level dpm_level; bool block_hw_access; @@ -607,6 +612,7 @@ struct pp_hwmgr { uint32_t num_ps; struct pp_thermal_controller_info thermal_controller; bool fan_ctrl_is_in_default_mode; + bool powercontainment_enabled; uint32_t fan_ctrl_default_mode; uint32_t tmin; struct phm_microcode_version_info microcode_version_info; diff --git a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h index 0c6a413eaa5b..d41d37ab5b7c 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h +++ b/drivers/gpu/drm/amd/powerplay/inc/polaris10_ppsmc.h @@ -27,6 +27,7 @@ #pragma pack(push, 1) +#define PPSMC_MSG_SetGBDroopSettings ((uint16_t) 0x305) #define PPSMC_SWSTATE_FLAG_DC 0x01 #define PPSMC_SWSTATE_FLAG_UVD 0x02 diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h index 3bd5e69b9045..3df5de2cdab0 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h +++ b/drivers/gpu/drm/amd/powerplay/inc/pp_acpi.h @@ -26,3 +26,4 @@ extern bool acpi_atcs_functions_supported(void *device, extern int acpi_pcie_perf_request(void *device, uint8_t perf_req, bool advertise); +extern bool acpi_atcs_notify_pcie_device_ready(void *device); diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74.h b/drivers/gpu/drm/amd/powerplay/inc/smu74.h index 1a12d85b8e97..fd10a9fa843d 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu74.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu74.h @@ -34,6 +34,30 @@ #define SMU__NUM_LCLK_DPM_LEVELS 8 #define SMU__NUM_PCIE_DPM_LEVELS 8 +#define EXP_M1 35 +#define EXP_M2 92821 +#define EXP_B 66629747 + +#define EXP_M1_1 365 +#define EXP_M2_1 658700 +#define EXP_B_1 305506134 + +#define EXP_M1_2 189 +#define EXP_M2_2 379692 +#define EXP_B_2 194609469 + +#define EXP_M1_3 99 +#define EXP_M2_3 217915 +#define EXP_B_3 122255994 + +#define EXP_M1_4 51 +#define EXP_M2_4 122643 +#define EXP_B_4 74893384 + +#define EXP_M1_5 423 +#define EXP_M2_5 1103326 +#define EXP_B_5 728122621 + enum SID_OPTION { SID_OPTION_HI, SID_OPTION_LO, @@ -548,20 +572,20 @@ struct SMU74_Firmware_Header { uint32_t CacConfigTable; uint32_t CacStatusTable; - uint32_t mcRegisterTable; - uint32_t mcArbDramTimingTable; - - - uint32_t PmFuseTable; uint32_t Globals; uint32_t ClockStretcherTable; uint32_t VftTable; - uint32_t Reserved[21]; + uint32_t Reserved1; + uint32_t AvfsTable; + uint32_t AvfsCksOffGbvTable; + uint32_t AvfsMeanNSigma; + uint32_t AvfsSclkOffsetTable; + uint32_t Reserved[16]; uint32_t Signature; }; @@ -701,8 +725,6 @@ VR Config info is contained in dpmTable.VRConfig */ struct SMU_ClockStretcherDataTableEntry { uint8_t minVID; uint8_t maxVID; - - uint16_t setting; }; typedef struct SMU_ClockStretcherDataTableEntry SMU_ClockStretcherDataTableEntry; @@ -769,6 +791,43 @@ struct VFT_TABLE_t { typedef struct VFT_TABLE_t VFT_TABLE_t; +/* Total margin, root mean square of Fmax + DC + Platform */ +struct AVFS_Margin_t { + VFT_CELL_t Cell[NUM_VFT_COLUMNS]; +}; +typedef struct AVFS_Margin_t AVFS_Margin_t; + +#define BTCGB_VDROOP_TABLE_MAX_ENTRIES 2 +#define AVFSGB_VDROOP_TABLE_MAX_ENTRIES 2 + +struct GB_VDROOP_TABLE_t { + int32_t a0; + int32_t a1; + int32_t a2; + uint32_t spare; +}; +typedef struct GB_VDROOP_TABLE_t GB_VDROOP_TABLE_t; + +struct AVFS_CksOff_Gbv_t { + VFT_CELL_t Cell[NUM_VFT_COLUMNS]; +}; +typedef struct AVFS_CksOff_Gbv_t AVFS_CksOff_Gbv_t; + +struct AVFS_meanNsigma_t { + uint32_t Aconstant[3]; + uint16_t DC_tol_sigma; + uint16_t Platform_mean; + uint16_t Platform_sigma; + uint16_t PSM_Age_CompFactor; + uint8_t Static_Voltage_Offset[NUM_VFT_COLUMNS]; +}; +typedef struct AVFS_meanNsigma_t AVFS_meanNsigma_t; + +struct AVFS_Sclk_Offset_t { + uint16_t Sclk_Offset[8]; +}; +typedef struct AVFS_Sclk_Offset_t AVFS_Sclk_Offset_t; + #endif diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h index 0dfe82336dc7..b85ff5400e57 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h +++ b/drivers/gpu/drm/amd/powerplay/inc/smu74_discrete.h @@ -223,6 +223,16 @@ struct SMU74_Discrete_StateInfo { typedef struct SMU74_Discrete_StateInfo SMU74_Discrete_StateInfo; +struct SMU_QuadraticCoeffs { + int32_t m1; + uint32_t b; + + int16_t m2; + uint8_t m1_shift; + uint8_t m2_shift; +}; +typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs; + struct SMU74_Discrete_DpmTable { SMU74_PIDController GraphicsPIDController; @@ -258,7 +268,14 @@ struct SMU74_Discrete_DpmTable { uint8_t ThermOutPolarity; uint8_t ThermOutMode; uint8_t BootPhases; - uint32_t Reserved[4]; + + uint8_t VRHotLevel; + uint8_t Reserved1[3]; + uint16_t FanStartTemperature; + uint16_t FanStopTemperature; + uint16_t MaxVoltage; + uint16_t Reserved2; + uint32_t Reserved[1]; SMU74_Discrete_GraphicsLevel GraphicsLevel[SMU74_MAX_LEVELS_GRAPHICS]; SMU74_Discrete_MemoryLevel MemoryACPILevel; @@ -347,6 +364,8 @@ struct SMU74_Discrete_DpmTable { uint32_t CurrSclkPllRange; sclkFcwRange_t SclkFcwRangeTable[NUM_SCLK_RANGE]; + GB_VDROOP_TABLE_t BTCGB_VDROOP_TABLE[BTCGB_VDROOP_TABLE_MAX_ENTRIES]; + SMU_QuadraticCoeffs AVFSGB_VDROOP_TABLE[AVFSGB_VDROOP_TABLE_MAX_ENTRIES]; }; typedef struct SMU74_Discrete_DpmTable SMU74_Discrete_DpmTable; @@ -550,16 +569,6 @@ struct SMU7_AcpiScoreboard { typedef struct SMU7_AcpiScoreboard SMU7_AcpiScoreboard; -struct SMU_QuadraticCoeffs { - int32_t m1; - uint32_t b; - - int16_t m2; - uint8_t m1_shift; - uint8_t m2_shift; -}; -typedef struct SMU_QuadraticCoeffs SMU_QuadraticCoeffs; - struct SMU74_Discrete_PmFuses { uint8_t BapmVddCVidHiSidd[8]; uint8_t BapmVddCVidLoSidd[8]; @@ -821,6 +830,17 @@ typedef struct SMU7_GfxCuPgScoreboard SMU7_GfxCuPgScoreboard; #define DB_PCC_SHIFT 26 #define DB_EDC_SHIFT 27 +#define BTCGB0_Vdroop_Enable_MASK 0x1 +#define BTCGB1_Vdroop_Enable_MASK 0x2 +#define AVFSGB0_Vdroop_Enable_MASK 0x4 +#define AVFSGB1_Vdroop_Enable_MASK 0x8 + +#define BTCGB0_Vdroop_Enable_SHIFT 0 +#define BTCGB1_Vdroop_Enable_SHIFT 1 +#define AVFSGB0_Vdroop_Enable_SHIFT 2 +#define AVFSGB1_Vdroop_Enable_SHIFT 3 + + #pragma pack(pop) diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c index 673a75c74e18..8e52a2e82db5 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/fiji_smumgr.c @@ -1006,10 +1006,16 @@ static int fiji_smu_init(struct pp_smumgr *smumgr) static int fiji_smu_fini(struct pp_smumgr *smumgr) { + struct fiji_smumgr *priv = (struct fiji_smumgr *)(smumgr->backend); + + smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); + if (smumgr->backend) { kfree(smumgr->backend); smumgr->backend = NULL; } + + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c index de618ead9db8..5dba7c509710 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c @@ -52,19 +52,18 @@ static const SMU74_Discrete_GraphicsLevel avfs_graphics_level_polaris10[8] = { /* Min pcie DeepSleep Activity CgSpll CgSpll CcPwr CcPwr Sclk Enabled Enabled Voltage Power */ /* Voltage, DpmLevel, DivId, Level, FuncCntl3, FuncCntl4, DynRm, DynRm1 Did, Padding,ForActivity, ForThrottle, UpHyst, DownHyst, DownHyst, Throttle */ - { 0x3c0fd047, 0x00, 0x03, 0x1e00, 0x00200410, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x30750000, 0, 0, 0, 0, 0, 0, 0 } }, - { 0xa00fd047, 0x01, 0x04, 0x1e00, 0x00800510, 0x87020000, 0, 0, 0x16, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x409c0000, 0, 0, 0, 0, 0, 0, 0 } }, - { 0x0410d047, 0x01, 0x00, 0x1e00, 0x00600410, 0x87020000, 0, 0, 0x0e, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x50c30000, 0, 0, 0, 0, 0, 0, 0 } }, - { 0x6810d047, 0x01, 0x00, 0x1e00, 0x00800410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x60ea0000, 0, 0, 0, 0, 0, 0, 0 } }, - { 0xcc10d047, 0x01, 0x00, 0x1e00, 0x00e00410, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xe8fd0000, 0, 0, 0, 0, 0, 0, 0 } }, - { 0x3011d047, 0x01, 0x00, 0x1e00, 0x00400510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x70110100, 0, 0, 0, 0, 0, 0, 0 } }, - { 0x9411d047, 0x01, 0x00, 0x1e00, 0x00a00510, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0xf8240100, 0, 0, 0, 0, 0, 0, 0 } }, - { 0xf811d047, 0x01, 0x00, 0x1e00, 0x00000610, 0x87020000, 0, 0, 0x0c, 0, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, { 0x80380100, 0, 0, 0, 0, 0, 0, 0 } } + { 0x100ea446, 0x00, 0x03, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x30750000, 0x3000, 0, 0x2600, 0, 0, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } }, + { 0x400ea446, 0x01, 0x04, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x409c0000, 0x2000, 0, 0x1e00, 1, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } }, + { 0x740ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x50c30000, 0x2800, 0, 0x2000, 1, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } }, + { 0xa40ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x60ea0000, 0x3000, 0, 0x2600, 1, 1, 0x0004, 0x8f02, 0xffff, 0x2f00, 0x300e, 0x2700 } }, + { 0xd80ea446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x70110100, 0x3800, 0, 0x2c00, 1, 1, 0x0004, 0x1203, 0xffff, 0x3600, 0xc9e2, 0x2e00 } }, + { 0x3c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x80380100, 0x2000, 0, 0x1e00, 2, 1, 0x0004, 0x8300, 0xffff, 0x1f00, 0xcb5e, 0x1a00 } }, + { 0x6c0fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0x905f0100, 0x2400, 0, 0x1e00, 2, 1, 0x0004, 0x8901, 0xffff, 0x2300, 0x314c, 0x1d00 } }, + { 0xa00fa446, 0x01, 0x00, 0x3200, 0, 0, 0, 0, 0, 0, 0x01, 0x01, 0x0a, 0x00, 0x00, 0x00, { 0xa0860100, 0x2800, 0, 0x2000, 2, 1, 0x0004, 0x0c02, 0xffff, 0x2700, 0x6433, 0x2100 } } }; static const SMU74_Discrete_MemoryLevel avfs_memory_level_polaris10 = - {0x50140000, 0x50140000, 0x00320000, 0x00, 0x00, - 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x0000, 0x00, 0x00}; + {0x100ea446, 0, 0x30750000, 0x01, 0x01, 0x01, 0x00, 0x00, 0x64, 0x00, 0x00, 0x1f00, 0x00, 0x00}; /** * Set the address for reading/writing the SMC SRAM space. @@ -219,6 +218,18 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr) && (0x20100 <= cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMC_PC_C))); } +static bool polaris10_is_hw_avfs_present(struct pp_smumgr *smumgr) +{ + uint32_t efuse; + + efuse = cgs_read_ind_register(smumgr->device, CGS_IND_REG__SMC, ixSMU_EFUSE_0 + (49*4)); + efuse &= 0x00000001; + if (efuse) + return true; + + return false; +} + /** * Send a message to the SMC, and wait for its response. * @@ -228,21 +239,27 @@ bool polaris10_is_smc_ram_running(struct pp_smumgr *smumgr) */ int polaris10_send_msg_to_smc(struct pp_smumgr *smumgr, uint16_t msg) { + int ret; + if (!polaris10_is_smc_ram_running(smumgr)) return -1; + SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) - printk("Failed to send Previous Message.\n"); + ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + if (ret != 1) + printk("\n failed to send pre message %x ret is %d \n", msg, ret); cgs_write_register(smumgr->device, mmSMC_MESSAGE_0, msg); SMUM_WAIT_FIELD_UNEQUAL(smumgr, SMC_RESP_0, SMC_RESP, 0); - if (1 != SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP)) - printk("Failed to send Message.\n"); + ret = SMUM_READ_FIELD(smumgr->device, SMC_RESP_0, SMC_RESP); + + if (ret != 1) + printk("\n failed to send message %x ret is %d \n", msg, ret); return 0; } @@ -469,6 +486,7 @@ int polaris10_smu_fini(struct pp_smumgr *smumgr) kfree(smumgr->backend); smumgr->backend = NULL; } + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); return 0; } @@ -952,6 +970,11 @@ static int polaris10_smu_init(struct pp_smumgr *smumgr) (cgs_handle_t)smu_data->smu_buffer.handle); return -1;); + if (polaris10_is_hw_avfs_present(smumgr)) + smu_data->avfs.avfs_btc_status = AVFS_BTC_BOOT; + else + smu_data->avfs.avfs_btc_status = AVFS_BTC_NOTSUPPORTED; + return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c index c483baf6b4fb..7723473e51a0 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c @@ -23,6 +23,7 @@ #include <linux/types.h> #include <linux/kernel.h> #include <linux/slab.h> +#include <drm/amdgpu_drm.h> #include "pp_instance.h" #include "smumgr.h" #include "cgs_common.h" @@ -52,10 +53,10 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) handle->smu_mgr = smumgr; switch (smumgr->chip_family) { - case AMD_FAMILY_CZ: + case AMDGPU_FAMILY_CZ: cz_smum_init(smumgr); break; - case AMD_FAMILY_VI: + case AMDGPU_FAMILY_VI: switch (smumgr->chip_id) { case CHIP_TONGA: tonga_smum_init(smumgr); @@ -81,6 +82,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle) int smum_fini(struct pp_smumgr *smumgr) { + kfree(smumgr->device); kfree(smumgr); return 0; } diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c index 32820b680d88..b22722eabafc 100644 --- a/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c +++ b/drivers/gpu/drm/amd/powerplay/smumgr/tonga_smumgr.c @@ -328,10 +328,17 @@ int tonga_write_smc_sram_dword(struct pp_smumgr *smumgr, static int tonga_smu_fini(struct pp_smumgr *smumgr) { + struct tonga_smumgr *priv = (struct tonga_smumgr *)(smumgr->backend); + + smu_free_memory(smumgr->device, (void *)priv->smu_buffer.handle); + smu_free_memory(smumgr->device, (void *)priv->header_buffer.handle); + if (smumgr->backend != NULL) { kfree(smumgr->backend); smumgr->backend = NULL; } + + cgs_rel_firmware(smumgr->device, CGS_UCODE_ID_SMU); return 0; } diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h index c89dc777768f..b961a1c6caf3 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h @@ -26,7 +26,7 @@ TRACE_EVENT(amd_sched_job, TP_fast_assign( __entry->entity = sched_job->s_entity; __entry->sched_job = sched_job; - __entry->fence = &sched_job->s_fence->base; + __entry->fence = &sched_job->s_fence->finished; __entry->name = sched_job->sched->name; __entry->job_count = kfifo_len( &sched_job->s_entity->job_queue) / sizeof(sched_job); @@ -46,7 +46,7 @@ TRACE_EVENT(amd_sched_process_job, ), TP_fast_assign( - __entry->fence = &fence->base; + __entry->fence = &fence->finished; ), TP_printk("fence=%p signaled", __entry->fence) ); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index c16248cee779..70ff09d10885 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -32,6 +32,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity); static void amd_sched_wakeup(struct amd_gpu_scheduler *sched); +static void amd_sched_process_job(struct fence *f, struct fence_cb *cb); struct kmem_cache *sched_fence_slab; atomic_t sched_fence_slab_ref = ATOMIC_INIT(0); @@ -140,7 +141,7 @@ int amd_sched_entity_init(struct amd_gpu_scheduler *sched, return r; atomic_set(&entity->fence_seq, 0); - entity->fence_context = fence_context_alloc(1); + entity->fence_context = fence_context_alloc(2); return 0; } @@ -251,17 +252,21 @@ static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity) s_fence = to_amd_sched_fence(fence); if (s_fence && s_fence->sched == sched) { - /* Fence is from the same scheduler */ - if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) { - /* Ignore it when it is already scheduled */ - fence_put(entity->dependency); - return false; - } - /* Wait for fence to be scheduled */ - entity->cb.func = amd_sched_entity_clear_dep; - list_add_tail(&entity->cb.node, &s_fence->scheduled_cb); - return true; + /* + * Fence is from the same scheduler, only need to wait for + * it to be scheduled + */ + fence = fence_get(&s_fence->scheduled); + fence_put(entity->dependency); + entity->dependency = fence; + if (!fence_add_callback(fence, &entity->cb, + amd_sched_entity_clear_dep)) + return true; + + /* Ignore it when it is already scheduled */ + fence_put(fence); + return false; } if (!fence_add_callback(entity->dependency, &entity->cb, @@ -319,46 +324,108 @@ static bool amd_sched_entity_in(struct amd_sched_job *sched_job) return added; } -static void amd_sched_free_job(struct fence *f, struct fence_cb *cb) { - struct amd_sched_job *job = container_of(cb, struct amd_sched_job, cb_free_job); - schedule_work(&job->work_free_job); -} - /* job_finish is called after hw fence signaled, and * the job had already been deleted from ring_mirror_list */ -void amd_sched_job_finish(struct amd_sched_job *s_job) +static void amd_sched_job_finish(struct work_struct *work) { - struct amd_sched_job *next; + struct amd_sched_job *s_job = container_of(work, struct amd_sched_job, + finish_work); struct amd_gpu_scheduler *sched = s_job->sched; + /* remove job from ring_mirror_list */ + spin_lock(&sched->job_list_lock); + list_del_init(&s_job->node); if (sched->timeout != MAX_SCHEDULE_TIMEOUT) { - if (cancel_delayed_work(&s_job->work_tdr)) - amd_sched_job_put(s_job); + struct amd_sched_job *next; + + spin_unlock(&sched->job_list_lock); + cancel_delayed_work_sync(&s_job->work_tdr); + spin_lock(&sched->job_list_lock); /* queue TDR for next job */ next = list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node); - if (next) { - INIT_DELAYED_WORK(&next->work_tdr, s_job->timeout_callback); - amd_sched_job_get(next); + if (next) schedule_delayed_work(&next->work_tdr, sched->timeout); - } } + spin_unlock(&sched->job_list_lock); + sched->ops->free_job(s_job); } -void amd_sched_job_begin(struct amd_sched_job *s_job) +static void amd_sched_job_finish_cb(struct fence *f, struct fence_cb *cb) +{ + struct amd_sched_job *job = container_of(cb, struct amd_sched_job, + finish_cb); + schedule_work(&job->finish_work); +} + +static void amd_sched_job_begin(struct amd_sched_job *s_job) { struct amd_gpu_scheduler *sched = s_job->sched; + spin_lock(&sched->job_list_lock); + list_add_tail(&s_job->node, &sched->ring_mirror_list); if (sched->timeout != MAX_SCHEDULE_TIMEOUT && - list_first_entry_or_null(&sched->ring_mirror_list, struct amd_sched_job, node) == s_job) - { - INIT_DELAYED_WORK(&s_job->work_tdr, s_job->timeout_callback); - amd_sched_job_get(s_job); + list_first_entry_or_null(&sched->ring_mirror_list, + struct amd_sched_job, node) == s_job) + schedule_delayed_work(&s_job->work_tdr, sched->timeout); + spin_unlock(&sched->job_list_lock); +} + +static void amd_sched_job_timedout(struct work_struct *work) +{ + struct amd_sched_job *job = container_of(work, struct amd_sched_job, + work_tdr.work); + + job->sched->ops->timedout_job(job); +} + +void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched) +{ + struct amd_sched_job *s_job; + + spin_lock(&sched->job_list_lock); + list_for_each_entry_reverse(s_job, &sched->ring_mirror_list, node) { + if (fence_remove_callback(s_job->s_fence->parent, &s_job->s_fence->cb)) { + fence_put(s_job->s_fence->parent); + s_job->s_fence->parent = NULL; + } + } + spin_unlock(&sched->job_list_lock); +} + +void amd_sched_job_recovery(struct amd_gpu_scheduler *sched) +{ + struct amd_sched_job *s_job; + int r; + + spin_lock(&sched->job_list_lock); + s_job = list_first_entry_or_null(&sched->ring_mirror_list, + struct amd_sched_job, node); + if (s_job) schedule_delayed_work(&s_job->work_tdr, sched->timeout); + + list_for_each_entry(s_job, &sched->ring_mirror_list, node) { + struct amd_sched_fence *s_fence = s_job->s_fence; + struct fence *fence = sched->ops->run_job(s_job); + if (fence) { + s_fence->parent = fence_get(fence); + r = fence_add_callback(fence, &s_fence->cb, + amd_sched_process_job); + if (r == -ENOENT) + amd_sched_process_job(fence, &s_fence->cb); + else if (r) + DRM_ERROR("fence add callback failed (%d)\n", + r); + fence_put(fence); + } else { + DRM_ERROR("Failed to run job!\n"); + amd_sched_process_job(NULL, &s_fence->cb); + } } + spin_unlock(&sched->job_list_lock); } /** @@ -372,36 +439,29 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job) { struct amd_sched_entity *entity = sched_job->s_entity; - sched_job->use_sched = 1; - fence_add_callback(&sched_job->s_fence->base, - &sched_job->cb_free_job, amd_sched_free_job); trace_amd_sched_job(sched_job); + fence_add_callback(&sched_job->s_fence->finished, &sched_job->finish_cb, + amd_sched_job_finish_cb); wait_event(entity->sched->job_scheduled, amd_sched_entity_in(sched_job)); } /* init a sched_job with basic field */ int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - void (*timeout_cb)(struct work_struct *work), - void (*free_cb)(struct kref *refcount), - void *owner, struct fence **fence) + struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + void *owner) { - INIT_LIST_HEAD(&job->node); - kref_init(&job->refcount); job->sched = sched; job->s_entity = entity; job->s_fence = amd_sched_fence_create(entity, owner); if (!job->s_fence) return -ENOMEM; - job->s_fence->s_job = job; - job->timeout_callback = timeout_cb; - job->free_callback = free_cb; + INIT_WORK(&job->finish_work, amd_sched_job_finish); + INIT_LIST_HEAD(&job->node); + INIT_DELAYED_WORK(&job->work_tdr, amd_sched_job_timedout); - if (fence) - *fence = &job->s_fence->base; return 0; } @@ -450,23 +510,25 @@ static void amd_sched_process_job(struct fence *f, struct fence_cb *cb) struct amd_sched_fence *s_fence = container_of(cb, struct amd_sched_fence, cb); struct amd_gpu_scheduler *sched = s_fence->sched; - unsigned long flags; atomic_dec(&sched->hw_rq_count); - - /* remove job from ring_mirror_list */ - spin_lock_irqsave(&sched->job_list_lock, flags); - list_del_init(&s_fence->s_job->node); - sched->ops->finish_job(s_fence->s_job); - spin_unlock_irqrestore(&sched->job_list_lock, flags); - - amd_sched_fence_signal(s_fence); + amd_sched_fence_finished(s_fence); trace_amd_sched_process_job(s_fence); - fence_put(&s_fence->base); + fence_put(&s_fence->finished); wake_up_interruptible(&sched->wake_up_worker); } +static bool amd_sched_blocked(struct amd_gpu_scheduler *sched) +{ + if (kthread_should_park()) { + kthread_parkme(); + return true; + } + + return false; +} + static int amd_sched_main(void *param) { struct sched_param sparam = {.sched_priority = 1}; @@ -476,14 +538,15 @@ static int amd_sched_main(void *param) sched_setscheduler(current, SCHED_FIFO, &sparam); while (!kthread_should_stop()) { - struct amd_sched_entity *entity; + struct amd_sched_entity *entity = NULL; struct amd_sched_fence *s_fence; struct amd_sched_job *sched_job; struct fence *fence; wait_event_interruptible(sched->wake_up_worker, - (entity = amd_sched_select_entity(sched)) || - kthread_should_stop()); + (!amd_sched_blocked(sched) && + (entity = amd_sched_select_entity(sched))) || + kthread_should_stop()); if (!entity) continue; @@ -495,16 +558,19 @@ static int amd_sched_main(void *param) s_fence = sched_job->s_fence; atomic_inc(&sched->hw_rq_count); - amd_sched_job_pre_schedule(sched, sched_job); + amd_sched_job_begin(sched_job); + fence = sched->ops->run_job(sched_job); amd_sched_fence_scheduled(s_fence); if (fence) { + s_fence->parent = fence_get(fence); r = fence_add_callback(fence, &s_fence->cb, amd_sched_process_job); if (r == -ENOENT) amd_sched_process_job(fence, &s_fence->cb); else if (r) - DRM_ERROR("fence add callback failed (%d)\n", r); + DRM_ERROR("fence add callback failed (%d)\n", + r); fence_put(fence); } else { DRM_ERROR("Failed to run job!\n"); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h index 070095a9433c..7cbbbfb502ef 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h @@ -27,8 +27,6 @@ #include <linux/kfifo.h> #include <linux/fence.h> -#define AMD_SCHED_FENCE_SCHEDULED_BIT FENCE_FLAG_USER_BITS - struct amd_gpu_scheduler; struct amd_sched_rq; @@ -68,36 +66,34 @@ struct amd_sched_rq { }; struct amd_sched_fence { - struct fence base; + struct fence scheduled; + struct fence finished; struct fence_cb cb; - struct list_head scheduled_cb; + struct fence *parent; struct amd_gpu_scheduler *sched; spinlock_t lock; void *owner; - struct amd_sched_job *s_job; }; struct amd_sched_job { - struct kref refcount; struct amd_gpu_scheduler *sched; struct amd_sched_entity *s_entity; struct amd_sched_fence *s_fence; - bool use_sched; /* true if the job goes to scheduler */ - struct fence_cb cb_free_job; - struct work_struct work_free_job; - struct list_head node; - struct delayed_work work_tdr; - void (*timeout_callback) (struct work_struct *work); - void (*free_callback)(struct kref *refcount); + struct fence_cb finish_cb; + struct work_struct finish_work; + struct list_head node; + struct delayed_work work_tdr; }; -extern const struct fence_ops amd_sched_fence_ops; +extern const struct fence_ops amd_sched_fence_ops_scheduled; +extern const struct fence_ops amd_sched_fence_ops_finished; static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) { - struct amd_sched_fence *__f = container_of(f, struct amd_sched_fence, base); + if (f->ops == &amd_sched_fence_ops_scheduled) + return container_of(f, struct amd_sched_fence, scheduled); - if (__f->base.ops == &amd_sched_fence_ops) - return __f; + if (f->ops == &amd_sched_fence_ops_finished) + return container_of(f, struct amd_sched_fence, finished); return NULL; } @@ -109,8 +105,8 @@ static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f) struct amd_sched_backend_ops { struct fence *(*dependency)(struct amd_sched_job *sched_job); struct fence *(*run_job)(struct amd_sched_job *sched_job); - void (*begin_job)(struct amd_sched_job *sched_job); - void (*finish_job)(struct amd_sched_job *sched_job); + void (*timedout_job)(struct amd_sched_job *sched_job); + void (*free_job)(struct amd_sched_job *sched_job); }; enum amd_sched_priority { @@ -152,25 +148,11 @@ void amd_sched_entity_push_job(struct amd_sched_job *sched_job); struct amd_sched_fence *amd_sched_fence_create( struct amd_sched_entity *s_entity, void *owner); void amd_sched_fence_scheduled(struct amd_sched_fence *fence); -void amd_sched_fence_signal(struct amd_sched_fence *fence); +void amd_sched_fence_finished(struct amd_sched_fence *fence); int amd_sched_job_init(struct amd_sched_job *job, - struct amd_gpu_scheduler *sched, - struct amd_sched_entity *entity, - void (*timeout_cb)(struct work_struct *work), - void (*free_cb)(struct kref* refcount), - void *owner, struct fence **fence); -void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched , - struct amd_sched_job *s_job); -void amd_sched_job_finish(struct amd_sched_job *s_job); -void amd_sched_job_begin(struct amd_sched_job *s_job); -static inline void amd_sched_job_get(struct amd_sched_job *job) { - if (job) - kref_get(&job->refcount); -} - -static inline void amd_sched_job_put(struct amd_sched_job *job) { - if (job) - kref_put(&job->refcount, job->free_callback); -} - + struct amd_gpu_scheduler *sched, + struct amd_sched_entity *entity, + void *owner); +void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched); +void amd_sched_job_recovery(struct amd_gpu_scheduler *sched); #endif diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c index 2a732c490375..6b63beaf7574 100644 --- a/drivers/gpu/drm/amd/scheduler/sched_fence.c +++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c @@ -27,7 +27,8 @@ #include <drm/drmP.h> #include "gpu_scheduler.h" -struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity, void *owner) +struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *entity, + void *owner) { struct amd_sched_fence *fence = NULL; unsigned seq; @@ -36,46 +37,37 @@ struct amd_sched_fence *amd_sched_fence_create(struct amd_sched_entity *s_entity if (fence == NULL) return NULL; - INIT_LIST_HEAD(&fence->scheduled_cb); fence->owner = owner; - fence->sched = s_entity->sched; + fence->sched = entity->sched; spin_lock_init(&fence->lock); - seq = atomic_inc_return(&s_entity->fence_seq); - fence_init(&fence->base, &amd_sched_fence_ops, &fence->lock, - s_entity->fence_context, seq); + seq = atomic_inc_return(&entity->fence_seq); + fence_init(&fence->scheduled, &amd_sched_fence_ops_scheduled, + &fence->lock, entity->fence_context, seq); + fence_init(&fence->finished, &amd_sched_fence_ops_finished, + &fence->lock, entity->fence_context + 1, seq); return fence; } -void amd_sched_fence_signal(struct amd_sched_fence *fence) +void amd_sched_fence_scheduled(struct amd_sched_fence *fence) { - int ret = fence_signal(&fence->base); + int ret = fence_signal(&fence->scheduled); + if (!ret) - FENCE_TRACE(&fence->base, "signaled from irq context\n"); + FENCE_TRACE(&fence->scheduled, "signaled from irq context\n"); else - FENCE_TRACE(&fence->base, "was already signaled\n"); -} - -void amd_sched_job_pre_schedule(struct amd_gpu_scheduler *sched , - struct amd_sched_job *s_job) -{ - unsigned long flags; - spin_lock_irqsave(&sched->job_list_lock, flags); - list_add_tail(&s_job->node, &sched->ring_mirror_list); - sched->ops->begin_job(s_job); - spin_unlock_irqrestore(&sched->job_list_lock, flags); + FENCE_TRACE(&fence->scheduled, "was already signaled\n"); } -void amd_sched_fence_scheduled(struct amd_sched_fence *s_fence) +void amd_sched_fence_finished(struct amd_sched_fence *fence) { - struct fence_cb *cur, *tmp; + int ret = fence_signal(&fence->finished); - set_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &s_fence->base.flags); - list_for_each_entry_safe(cur, tmp, &s_fence->scheduled_cb, node) { - list_del_init(&cur->node); - cur->func(&s_fence->base, cur); - } + if (!ret) + FENCE_TRACE(&fence->finished, "signaled from irq context\n"); + else + FENCE_TRACE(&fence->finished, "was already signaled\n"); } static const char *amd_sched_fence_get_driver_name(struct fence *fence) @@ -105,6 +97,8 @@ static void amd_sched_fence_free(struct rcu_head *rcu) { struct fence *f = container_of(rcu, struct fence, rcu); struct amd_sched_fence *fence = to_amd_sched_fence(f); + + fence_put(fence->parent); kmem_cache_free(sched_fence_slab, fence); } @@ -116,16 +110,41 @@ static void amd_sched_fence_free(struct rcu_head *rcu) * This function is called when the reference count becomes zero. * It just RCU schedules freeing up the fence. */ -static void amd_sched_fence_release(struct fence *f) +static void amd_sched_fence_release_scheduled(struct fence *f) { - call_rcu(&f->rcu, amd_sched_fence_free); + struct amd_sched_fence *fence = to_amd_sched_fence(f); + + call_rcu(&fence->finished.rcu, amd_sched_fence_free); } -const struct fence_ops amd_sched_fence_ops = { +/** + * amd_sched_fence_release_scheduled - drop extra reference + * + * @f: fence + * + * Drop the extra reference from the scheduled fence to the base fence. + */ +static void amd_sched_fence_release_finished(struct fence *f) +{ + struct amd_sched_fence *fence = to_amd_sched_fence(f); + + fence_put(&fence->scheduled); +} + +const struct fence_ops amd_sched_fence_ops_scheduled = { + .get_driver_name = amd_sched_fence_get_driver_name, + .get_timeline_name = amd_sched_fence_get_timeline_name, + .enable_signaling = amd_sched_fence_enable_signaling, + .signaled = NULL, + .wait = fence_default_wait, + .release = amd_sched_fence_release_scheduled, +}; + +const struct fence_ops amd_sched_fence_ops_finished = { .get_driver_name = amd_sched_fence_get_driver_name, .get_timeline_name = amd_sched_fence_get_timeline_name, .enable_signaling = amd_sched_fence_enable_signaling, .signaled = NULL, .wait = fence_default_wait, - .release = amd_sched_fence_release, + .release = amd_sched_fence_release_finished, }; diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c index 34405e4a5d36..2f58e9e2a59c 100644 --- a/drivers/gpu/drm/armada/armada_crtc.c +++ b/drivers/gpu/drm/armada/armada_crtc.c @@ -410,7 +410,7 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat) DRM_ERROR("graphics underflow on crtc %u\n", dcrtc->num); if (stat & VSYNC_IRQ) - drm_handle_vblank(dcrtc->crtc.dev, dcrtc->num); + drm_crtc_handle_vblank(&dcrtc->crtc); spin_lock(&dcrtc->irq_lock); ovl_plane = dcrtc->plane; diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 59f2f93b6f84..b29a41218fc9 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -186,17 +186,6 @@ static void ast_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg * { } -static int ast_bo_move(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, - struct ttm_mem_reg *new_mem) -{ - int r; - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); - return r; -} - - static void ast_ttm_backend_destroy(struct ttm_tt *tt) { ttm_tt_fini(tt); @@ -241,7 +230,7 @@ struct ttm_bo_driver ast_bo_driver = { .ttm_tt_unpopulate = ast_ttm_tt_unpopulate, .init_mem_type = ast_bo_init_mem_type, .evict_flags = ast_bo_evict_flags, - .move = ast_bo_move, + .move = NULL, .verify_access = ast_bo_verify_access, .io_mem_reserve = &ast_ttm_io_mem_reserve, .io_mem_free = &ast_ttm_io_mem_free, diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c index 613f6c99b76a..a978381ef95b 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c @@ -383,7 +383,7 @@ static void atmel_hlcdc_crtc_finish_page_flip(struct atmel_hlcdc_crtc *crtc) void atmel_hlcdc_crtc_irq(struct drm_crtc *c) { - drm_handle_vblank(c->dev, 0); + drm_crtc_handle_vblank(c); atmel_hlcdc_crtc_finish_page_flip(drm_crtc_to_atmel_hlcdc_crtc(c)); } diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c index 473a475f27b1..6119b5085501 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c @@ -254,9 +254,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev) if (!ret) ret = atmel_hlcdc_check_endpoint(dev, &ep); - of_node_put(ep_np); - if (ret) + if (ret) { + of_node_put(ep_np); return ret; + } } for_each_endpoint_of_node(dev->dev->of_node, ep_np) { @@ -264,9 +265,10 @@ int atmel_hlcdc_create_outputs(struct drm_device *dev) if (!ret) ret = atmel_hlcdc_attach_endpoint(dev, &ep); - of_node_put(ep_np); - if (ret) + if (ret) { + of_node_put(ep_np); return ret; + } } return 0; diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c index aef3ca8a81fa..016c191221f3 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c @@ -339,6 +339,8 @@ atmel_hlcdc_plane_update_pos_and_size(struct atmel_hlcdc_plane *plane, atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, factor_reg); + } else { + atmel_hlcdc_layer_update_cfg(&plane->layer, 13, 0xffffffff, 0); } } diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c index 6cf912c45e48..b109fdcaa679 100644 --- a/drivers/gpu/drm/bochs/bochs_mm.c +++ b/drivers/gpu/drm/bochs/bochs_mm.c @@ -165,15 +165,6 @@ static void bochs_ttm_io_mem_free(struct ttm_bo_device *bdev, { } -static int bochs_bo_move(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, - struct ttm_mem_reg *new_mem) -{ - return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); -} - - static void bochs_ttm_backend_destroy(struct ttm_tt *tt) { ttm_tt_fini(tt); @@ -208,7 +199,7 @@ struct ttm_bo_driver bochs_bo_driver = { .ttm_tt_unpopulate = ttm_pool_unpopulate, .init_mem_type = bochs_bo_init_mem_type, .evict_flags = bochs_bo_evict_flags, - .move = bochs_bo_move, + .move = NULL, .verify_access = bochs_bo_verify_access, .io_mem_reserve = &bochs_ttm_io_mem_reserve, .io_mem_free = &bochs_ttm_io_mem_free, diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 6768b7b1af32..1cc9ee607128 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -186,17 +186,6 @@ static void cirrus_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_re { } -static int cirrus_bo_move(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, - struct ttm_mem_reg *new_mem) -{ - int r; - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); - return r; -} - - static void cirrus_ttm_backend_destroy(struct ttm_tt *tt) { ttm_tt_fini(tt); @@ -241,7 +230,7 @@ struct ttm_bo_driver cirrus_bo_driver = { .ttm_tt_unpopulate = cirrus_ttm_tt_unpopulate, .init_mem_type = cirrus_bo_init_mem_type, .evict_flags = cirrus_bo_evict_flags, - .move = cirrus_bo_move, + .move = NULL, .verify_access = cirrus_bo_verify_access, .io_mem_reserve = &cirrus_ttm_io_mem_reserve, .io_mem_free = &cirrus_ttm_io_mem_free, diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c index d99ab2f6663f..9359be4a0ca9 100644 --- a/drivers/gpu/drm/drm_atomic.c +++ b/drivers/gpu/drm/drm_atomic.c @@ -1299,14 +1299,39 @@ EXPORT_SYMBOL(drm_atomic_add_affected_planes); */ void drm_atomic_legacy_backoff(struct drm_atomic_state *state) { + struct drm_device *dev = state->dev; + unsigned crtc_mask = 0; + struct drm_crtc *crtc; int ret; + bool global = false; + + drm_for_each_crtc(crtc, dev) { + if (crtc->acquire_ctx != state->acquire_ctx) + continue; + + crtc_mask |= drm_crtc_mask(crtc); + crtc->acquire_ctx = NULL; + } + + if (WARN_ON(dev->mode_config.acquire_ctx == state->acquire_ctx)) { + global = true; + + dev->mode_config.acquire_ctx = NULL; + } retry: drm_modeset_backoff(state->acquire_ctx); - ret = drm_modeset_lock_all_ctx(state->dev, state->acquire_ctx); + ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); if (ret) goto retry; + + drm_for_each_crtc(crtc, dev) + if (drm_crtc_mask(crtc) & crtc_mask) + crtc->acquire_ctx = state->acquire_ctx; + + if (global) + dev->mode_config.acquire_ctx = state->acquire_ctx; } EXPORT_SYMBOL(drm_atomic_legacy_backoff); @@ -1564,6 +1589,72 @@ void drm_atomic_clean_old_fb(struct drm_device *dev, } EXPORT_SYMBOL(drm_atomic_clean_old_fb); +int drm_atomic_remove_fb(struct drm_framebuffer *fb) +{ + struct drm_modeset_acquire_ctx ctx; + struct drm_device *dev = fb->dev; + struct drm_atomic_state *state; + struct drm_plane *plane; + int ret = 0; + unsigned plane_mask; + + state = drm_atomic_state_alloc(dev); + if (!state) + return -ENOMEM; + + drm_modeset_acquire_init(&ctx, 0); + state->acquire_ctx = &ctx; + +retry: + plane_mask = 0; + ret = drm_modeset_lock_all_ctx(dev, &ctx); + if (ret) + goto unlock; + + drm_for_each_plane(plane, dev) { + struct drm_plane_state *plane_state; + + if (plane->state->fb != fb) + continue; + + plane_state = drm_atomic_get_plane_state(state, plane); + if (IS_ERR(plane_state)) { + ret = PTR_ERR(plane_state); + goto unlock; + } + + drm_atomic_set_fb_for_plane(plane_state, NULL); + ret = drm_atomic_set_crtc_for_plane(plane_state, NULL); + if (ret) + goto unlock; + + plane_mask |= BIT(drm_plane_index(plane)); + + plane->old_fb = plane->fb; + plane->fb = NULL; + } + + if (plane_mask) + ret = drm_atomic_commit(state); + +unlock: + if (plane_mask) + drm_atomic_clean_old_fb(dev, plane_mask, ret); + + if (ret == -EDEADLK) { + drm_modeset_backoff(&ctx); + goto retry; + } + + if (ret || !plane_mask) + drm_atomic_state_free(state); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; +} + int drm_mode_atomic_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c index 059f7c39c582..a7916e5f8864 100644 --- a/drivers/gpu/drm/drm_cache.c +++ b/drivers/gpu/drm/drm_cache.c @@ -136,6 +136,7 @@ drm_clflush_virt_range(void *addr, unsigned long length) mb(); for (; addr < end; addr += size) clflushopt(addr); + clflushopt(end - 1); /* force serialisation */ mb(); return; } diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index fd93e9c79d28..9d3f80efc9cc 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -396,6 +396,51 @@ void drm_mode_object_reference(struct drm_mode_object *obj) } EXPORT_SYMBOL(drm_mode_object_reference); +/** + * drm_crtc_force_disable - Forcibly turn off a CRTC + * @crtc: CRTC to turn off + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_crtc_force_disable(struct drm_crtc *crtc) +{ + struct drm_mode_set set = { + .crtc = crtc, + }; + + return drm_mode_set_config_internal(&set); +} +EXPORT_SYMBOL(drm_crtc_force_disable); + +/** + * drm_crtc_force_disable_all - Forcibly turn off all enabled CRTCs + * @dev: DRM device whose CRTCs to turn off + * + * Drivers may want to call this on unload to ensure that all displays are + * unlit and the GPU is in a consistent, low power state. Takes modeset locks. + * + * Returns: + * Zero on success, error code on failure. + */ +int drm_crtc_force_disable_all(struct drm_device *dev) +{ + struct drm_crtc *crtc; + int ret = 0; + + drm_modeset_lock_all(dev); + drm_for_each_crtc(crtc, dev) + if (crtc->enabled) { + ret = drm_crtc_force_disable(crtc); + if (ret) + goto out; + } +out: + drm_modeset_unlock_all(dev); + return ret; +} +EXPORT_SYMBOL(drm_crtc_force_disable_all); + static void drm_framebuffer_free(struct kref *kref) { struct drm_framebuffer *fb = @@ -544,8 +589,6 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) struct drm_device *dev; struct drm_crtc *crtc; struct drm_plane *plane; - struct drm_mode_set set; - int ret; if (!fb) return; @@ -570,16 +613,17 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) * in this manner. */ if (drm_framebuffer_read_refcount(fb) > 1) { + if (dev->mode_config.funcs->atomic_commit) { + drm_atomic_remove_fb(fb); + goto out; + } + drm_modeset_lock_all(dev); /* remove from any CRTC */ drm_for_each_crtc(crtc, dev) { if (crtc->primary->fb == fb) { /* should turn off the crtc */ - memset(&set, 0, sizeof(struct drm_mode_set)); - set.crtc = crtc; - set.fb = NULL; - ret = drm_mode_set_config_internal(&set); - if (ret) + if (drm_crtc_force_disable(crtc)) DRM_ERROR("failed to reset crtc %p when fb was deleted\n", crtc); } } @@ -591,6 +635,7 @@ void drm_framebuffer_remove(struct drm_framebuffer *fb) drm_modeset_unlock_all(dev); } +out: drm_framebuffer_unreference(fb); } EXPORT_SYMBOL(drm_framebuffer_remove); @@ -1068,23 +1113,7 @@ void drm_connector_unregister(struct drm_connector *connector) } EXPORT_SYMBOL(drm_connector_unregister); -/** - * drm_connector_register_all - register all connectors - * @dev: drm device - * - * This function registers all connectors in sysfs and other places so that - * userspace can start to access them. drm_connector_register_all() is called - * automatically from drm_dev_register() to complete the device registration, - * if they don't call drm_connector_register() on each connector individually. - * - * When a device is unplugged and should be removed from userspace access, - * call drm_connector_unregister_all(), which is the inverse of this - * function. - * - * Returns: - * Zero on success, error code on failure. - */ -int drm_connector_register_all(struct drm_device *dev) +static int drm_connector_register_all(struct drm_device *dev) { struct drm_connector *connector; int ret; @@ -1106,7 +1135,6 @@ err: drm_connector_unregister_all(dev); return ret; } -EXPORT_SYMBOL(drm_connector_register_all); /** * drm_connector_unregister_all - unregister connector userspace interfaces diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index d61591274ff6..604d3ef72ffa 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -531,11 +531,11 @@ drm_crtc_helper_disable(struct drm_crtc *crtc) int drm_crtc_helper_set_config(struct drm_mode_set *set) { struct drm_device *dev; - struct drm_crtc *new_crtc; - struct drm_encoder *save_encoders, *new_encoder, *encoder; + struct drm_crtc **save_encoder_crtcs, *new_crtc; + struct drm_encoder **save_connector_encoders, *new_encoder, *encoder; bool mode_changed = false; /* if true do a full mode set */ bool fb_changed = false; /* if true and !mode_changed just do a flip */ - struct drm_connector *save_connectors, *connector; + struct drm_connector *connector; int count = 0, ro, fail = 0; const struct drm_crtc_helper_funcs *crtc_funcs; struct drm_mode_set save_set; @@ -577,15 +577,15 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) * Allocate space for the backup of all (non-pointer) encoder and * connector data. */ - save_encoders = kzalloc(dev->mode_config.num_encoder * - sizeof(struct drm_encoder), GFP_KERNEL); - if (!save_encoders) + save_encoder_crtcs = kzalloc(dev->mode_config.num_encoder * + sizeof(struct drm_crtc *), GFP_KERNEL); + if (!save_encoder_crtcs) return -ENOMEM; - save_connectors = kzalloc(dev->mode_config.num_connector * - sizeof(struct drm_connector), GFP_KERNEL); - if (!save_connectors) { - kfree(save_encoders); + save_connector_encoders = kzalloc(dev->mode_config.num_connector * + sizeof(struct drm_encoder *), GFP_KERNEL); + if (!save_connector_encoders) { + kfree(save_encoder_crtcs); return -ENOMEM; } @@ -596,12 +596,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) */ count = 0; drm_for_each_encoder(encoder, dev) { - save_encoders[count++] = *encoder; + save_encoder_crtcs[count++] = encoder->crtc; } count = 0; drm_for_each_connector(connector, dev) { - save_connectors[count++] = *connector; + save_connector_encoders[count++] = connector->encoder; } save_set.crtc = set->crtc; @@ -634,8 +634,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) mode_changed = true; } - /* take a reference on all connectors in set */ + /* take a reference on all unbound connectors in set, reuse the + * already taken reference for bound connectors + */ for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro]->encoder) + continue; drm_connector_reference(set->connectors[ro]); } @@ -757,30 +761,28 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) } } - /* after fail drop reference on all connectors in save set */ - count = 0; - drm_for_each_connector(connector, dev) { - drm_connector_unreference(&save_connectors[count++]); - } - - kfree(save_connectors); - kfree(save_encoders); + kfree(save_connector_encoders); + kfree(save_encoder_crtcs); return 0; fail: /* Restore all previous data. */ count = 0; drm_for_each_encoder(encoder, dev) { - *encoder = save_encoders[count++]; + encoder->crtc = save_encoder_crtcs[count++]; } count = 0; drm_for_each_connector(connector, dev) { - *connector = save_connectors[count++]; + connector->encoder = save_connector_encoders[count++]; } - /* after fail drop reference on all connectors in set */ + /* after fail drop reference on all unbound connectors in set, let + * bound connectors keep their reference + */ for (ro = 0; ro < set->num_connectors; ro++) { + if (set->connectors[ro]->encoder) + continue; drm_connector_unreference(set->connectors[ro]); } @@ -790,8 +792,8 @@ fail: save_set.y, save_set.fb)) DRM_ERROR("failed to restore config after modeset failure\n"); - kfree(save_connectors); - kfree(save_encoders); + kfree(save_connector_encoders); + kfree(save_encoder_crtcs); return ret; } EXPORT_SYMBOL(drm_crtc_helper_set_config); diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h index 47a500b90fd7..b248e2238a05 100644 --- a/drivers/gpu/drm/drm_crtc_internal.h +++ b/drivers/gpu/drm/drm_crtc_internal.h @@ -125,6 +125,7 @@ int drm_atomic_get_property(struct drm_mode_object *obj, struct drm_property *property, uint64_t *val); int drm_mode_atomic_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int drm_atomic_remove_fb(struct drm_framebuffer *fb); int drm_modeset_register_all(struct drm_device *dev); void drm_modeset_unregister_all(struct drm_device *dev); diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c index 3334baacf43d..734f86a345f6 100644 --- a/drivers/gpu/drm/drm_dp_aux_dev.c +++ b/drivers/gpu/drm/drm_dp_aux_dev.c @@ -355,8 +355,7 @@ int drm_dp_aux_dev_init(void) drm_dp_aux_dev_class = class_create(THIS_MODULE, "drm_dp_aux_dev"); if (IS_ERR(drm_dp_aux_dev_class)) { - res = PTR_ERR(drm_dp_aux_dev_class); - goto out; + return PTR_ERR(drm_dp_aux_dev_class); } drm_dp_aux_dev_class->dev_groups = drm_dp_aux_groups; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index a13edf5de2d6..6537908050d7 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -2927,11 +2927,9 @@ static void drm_dp_destroy_connector_work(struct work_struct *work) drm_dp_port_teardown_pdt(port, port->pdt); if (!port->input && port->vcpi.vcpi > 0) { - if (mgr->mst_state) { - drm_dp_mst_reset_vcpi_slots(mgr, port); - drm_dp_update_payload_part1(mgr); - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); - } + drm_dp_mst_reset_vcpi_slots(mgr, port); + drm_dp_update_payload_part1(mgr); + drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); } kref_put(&port->kref, drm_dp_free_mst_port); diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index aead9ffcbe29..be27ed36f56e 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -362,9 +362,7 @@ EXPORT_SYMBOL(drm_put_dev); void drm_unplug_dev(struct drm_device *dev) { /* for a USB device */ - drm_minor_unregister(dev, DRM_MINOR_LEGACY); - drm_minor_unregister(dev, DRM_MINOR_RENDER); - drm_minor_unregister(dev, DRM_MINOR_CONTROL); + drm_dev_unregister(dev); mutex_lock(&drm_global_mutex); diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c index c0b0c718994a..1fd6eac1400c 100644 --- a/drivers/gpu/drm/drm_fb_cma_helper.c +++ b/drivers/gpu/drm/drm_fb_cma_helper.c @@ -596,3 +596,18 @@ void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma) drm_fb_helper_hotplug_event(&fbdev_cma->fb_helper); } EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event); + +/** + * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend + * @fbdev_cma: The drm_fbdev_cma struct, may be NULL + * @state: desired state, zero to resume, non-zero to suspend + * + * Calls drm_fb_helper_set_suspend, which is a wrapper around + * fb_set_suspend implemented by fbdev core. + */ +void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, int state) +{ + if (fbdev_cma) + drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state); +} +EXPORT_SYMBOL(drm_fbdev_cma_set_suspend); diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 1f84ff5f1bf8..33af4a5ddca1 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -648,7 +648,7 @@ long drm_ioctl(struct file *filp, int retcode = -EINVAL; char stack_kdata[128]; char *kdata = NULL; - unsigned int usize, asize, drv_size; + unsigned int in_size, out_size, drv_size, ksize; bool is_driver_ioctl; dev = file_priv->minor->dev; @@ -671,9 +671,12 @@ long drm_ioctl(struct file *filp, } drv_size = _IOC_SIZE(ioctl->cmd); - usize = _IOC_SIZE(cmd); - asize = max(usize, drv_size); - cmd = ioctl->cmd; + out_size = in_size = _IOC_SIZE(cmd); + if ((cmd & ioctl->cmd & IOC_IN) == 0) + in_size = 0; + if ((cmd & ioctl->cmd & IOC_OUT) == 0) + out_size = 0; + ksize = max(max(in_size, out_size), drv_size); DRM_DEBUG("pid=%d, dev=0x%lx, auth=%d, %s\n", task_pid_nr(current), @@ -693,30 +696,24 @@ long drm_ioctl(struct file *filp, if (unlikely(retcode)) goto err_i1; - if (cmd & (IOC_IN | IOC_OUT)) { - if (asize <= sizeof(stack_kdata)) { - kdata = stack_kdata; - } else { - kdata = kmalloc(asize, GFP_KERNEL); - if (!kdata) { - retcode = -ENOMEM; - goto err_i1; - } + if (ksize <= sizeof(stack_kdata)) { + kdata = stack_kdata; + } else { + kdata = kmalloc(ksize, GFP_KERNEL); + if (!kdata) { + retcode = -ENOMEM; + goto err_i1; } - if (asize > usize) - memset(kdata + usize, 0, asize - usize); } - if (cmd & IOC_IN) { - if (copy_from_user(kdata, (void __user *)arg, - usize) != 0) { - retcode = -EFAULT; - goto err_i1; - } - } else if (cmd & IOC_OUT) { - memset(kdata, 0, usize); + if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) { + retcode = -EFAULT; + goto err_i1; } + if (ksize > in_size) + memset(kdata + in_size, 0, ksize - in_size); + /* Enforce sane locking for kms driver ioctls. Core ioctls are * too messy still. */ if ((drm_core_check_feature(dev, DRIVER_MODESET) && is_driver_ioctl) || @@ -728,11 +725,8 @@ long drm_ioctl(struct file *filp, mutex_unlock(&drm_global_mutex); } - if (cmd & IOC_OUT) { - if (copy_to_user((void __user *)arg, kdata, - usize) != 0) - retcode = -EFAULT; - } + if (copy_to_user((void __user *)arg, kdata, out_size) != 0) + retcode = -EFAULT; err_i1: if (!ioctl) @@ -759,7 +753,7 @@ EXPORT_SYMBOL(drm_ioctl); * shouldn't be used by any drivers. * * Returns: - * True if the @nr corresponds to a DRM core ioctl numer, false otherwise. + * True if the @nr corresponds to a DRM core ioctl number, false otherwise. */ bool drm_ioctl_flags(unsigned int nr, unsigned int *flags) { diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 8ca3d2bf2bda..35c86acede38 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -532,7 +532,7 @@ int drm_irq_uninstall(struct drm_device *dev) /* * Wake up any waiters so they don't hang. This is just to paper over - * isssues for UMS drivers which aren't in full control of their + * issues for UMS drivers which aren't in full control of their * vblank/irq handling. KMS drivers must ensure that vblanks are all * disabled when uninstalling the irq handler. */ @@ -594,7 +594,7 @@ int drm_control(struct drm_device *dev, void *data, return 0; if (drm_core_check_feature(dev, DRIVER_MODESET)) return 0; - /* UMS was only ever support on pci devices. */ + /* UMS was only ever supported on pci devices. */ if (WARN_ON(!dev->pdev)) return -EINVAL; @@ -945,8 +945,8 @@ EXPORT_SYMBOL(drm_crtc_vblank_count); * * This is the legacy version of drm_crtc_vblank_count_and_time(). */ -u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, - struct timeval *vblanktime) +static u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, + struct timeval *vblanktime) { struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; u32 vblank_count; @@ -963,7 +963,6 @@ u32 drm_vblank_count_and_time(struct drm_device *dev, unsigned int pipe, return vblank_count; } -EXPORT_SYMBOL(drm_vblank_count_and_time); /** * drm_crtc_vblank_count_and_time - retrieve "cooked" vblank counter value @@ -975,8 +974,6 @@ EXPORT_SYMBOL(drm_vblank_count_and_time); * vblank events since the system was booted, including lost events due to * modesetting activity. Returns corresponding system timestamp of the time * of the vblank interval that corresponds to the current vblank counter value. - * - * This is the native KMS version of drm_vblank_count_and_time(). */ u32 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc, struct timeval *vblanktime) @@ -1588,12 +1585,6 @@ static int drm_queue_vblank_event(struct drm_device *dev, unsigned int pipe, seq = drm_vblank_count_and_time(dev, pipe, &now); - if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) && - (seq - vblwait->request.sequence) <= (1 << 23)) { - vblwait->request.sequence = seq + 1; - vblwait->reply.sequence = vblwait->request.sequence; - } - DRM_DEBUG("event on vblank count %d, current %d, crtc %u\n", vblwait->request.sequence, seq, pipe); @@ -1690,6 +1681,11 @@ int drm_wait_vblank(struct drm_device *dev, void *data, goto done; } + if ((flags & _DRM_VBLANK_NEXTONMISS) && + (seq - vblwait->request.sequence) <= (1 << 23)) { + vblwait->request.sequence = seq + 1; + } + if (flags & _DRM_VBLANK_EVENT) { /* must hold on to the vblank ref until the event fires * drm_vblank_put will be called asynchronously @@ -1697,11 +1693,6 @@ int drm_wait_vblank(struct drm_device *dev, void *data, return drm_queue_vblank_event(dev, pipe, vblwait, file_priv); } - if ((flags & _DRM_VBLANK_NEXTONMISS) && - (seq - vblwait->request.sequence) <= (1<<23)) { - vblwait->request.sequence = seq + 1; - } - DRM_DEBUG("waiting on vblank count %d, crtc %u\n", vblwait->request.sequence, pipe); vblank->last_wait = vblwait->request.sequence; diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c index 87a8cb73366f..fc0ebd273ef8 100644 --- a/drivers/gpu/drm/drm_memory.c +++ b/drivers/gpu/drm/drm_memory.c @@ -44,7 +44,7 @@ # include <asm/agp.h> #else # ifdef __powerpc__ -# define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) +# define PAGE_AGP pgprot_noncached_wc(PAGE_KERNEL) # else # define PAGE_AGP PAGE_KERNEL # endif diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c index 49311fc61d5d..af0d471ee246 100644 --- a/drivers/gpu/drm/drm_mipi_dsi.c +++ b/drivers/gpu/drm/drm_mipi_dsi.c @@ -999,17 +999,17 @@ int mipi_dsi_dcs_set_tear_on(struct mipi_dsi_device *dsi, EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_on); /** - * mipi_dsi_set_tear_scanline() - turn on the display module's Tearing Effect - * output signal on the TE signal line when display module reaches line N - * defined by STS[n:0]. + * mipi_dsi_dcs_set_tear_scanline() - set the scanline to use as trigger for + * the Tearing Effect output signal of the display module * @dsi: DSI peripheral device - * @param: STS[10:0] + * @scanline: scanline to use as trigger + * * Return: 0 on success or a negative error code on failure */ -int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param) +int mipi_dsi_dcs_set_tear_scanline(struct mipi_dsi_device *dsi, u16 scanline) { - u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, param >> 8, - param & 0xff }; + u8 payload[3] = { MIPI_DCS_SET_TEAR_SCANLINE, scanline >> 8, + scanline & 0xff }; ssize_t err; err = mipi_dsi_generic_write(dsi, payload, sizeof(payload)); @@ -1018,7 +1018,7 @@ int mipi_dsi_set_tear_scanline(struct mipi_dsi_device *dsi, u16 param) return 0; } -EXPORT_SYMBOL(mipi_dsi_set_tear_scanline); +EXPORT_SYMBOL(mipi_dsi_dcs_set_tear_scanline); /** * mipi_dsi_dcs_set_pixel_format() - sets the pixel format for the RGB image diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c index 4f0f3b36d537..bf70431073f6 100644 --- a/drivers/gpu/drm/drm_scatter.c +++ b/drivers/gpu/drm/drm_scatter.c @@ -41,7 +41,7 @@ static inline void *drm_vmalloc_dma(unsigned long size) { #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) - return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL | _PAGE_NO_CACHE); + return __vmalloc(size, GFP_KERNEL, pgprot_noncached_wc(PAGE_KERNEL)); #else return vmalloc_32(size); #endif diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c index 43ff44a2b8e7..caa4e4ca616d 100644 --- a/drivers/gpu/drm/drm_vm.c +++ b/drivers/gpu/drm/drm_vm.c @@ -80,7 +80,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) pgprot_t tmp = vm_get_page_prot(vma->vm_flags); #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE) - tmp |= _PAGE_NO_CACHE; + tmp = pgprot_noncached_wc(tmp); #endif return tmp; } @@ -593,7 +593,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma) * pages and mappings in fault() */ #if defined(__powerpc__) - pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif vma->vm_ops = &drm_vm_ops; break; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c index 340d390306d8..ffd1b32caa8d 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c @@ -91,10 +91,8 @@ static void load_gpu(struct drm_device *dev) int ret; ret = etnaviv_gpu_init(g); - if (ret) { - dev_err(g->dev, "hw init failed: %d\n", ret); + if (ret) priv->gpu[i] = NULL; - } } } } diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index ff6aa5dfb2d7..87ef34150d46 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -487,6 +487,47 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) return 0; } +static void etnaviv_gpu_enable_mlcg(struct etnaviv_gpu *gpu) +{ + u32 pmc, ppc; + + /* enable clock gating */ + ppc = gpu_read(gpu, VIVS_PM_POWER_CONTROLS); + ppc |= VIVS_PM_POWER_CONTROLS_ENABLE_MODULE_CLOCK_GATING; + + /* Disable stall module clock gating for 4.3.0.1 and 4.3.0.2 revs */ + if (gpu->identity.revision == 0x4301 || + gpu->identity.revision == 0x4302) + ppc |= VIVS_PM_POWER_CONTROLS_DISABLE_STALL_MODULE_CLOCK_GATING; + + gpu_write(gpu, VIVS_PM_POWER_CONTROLS, ppc); + + pmc = gpu_read(gpu, VIVS_PM_MODULE_CONTROLS); + + /* Disable PA clock gating for GC400+ except for GC420 */ + if (gpu->identity.model >= chipModel_GC400 && + gpu->identity.model != chipModel_GC420) + pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA; + + /* + * Disable PE clock gating on revs < 5.0.0.0 when HZ is + * present without a bug fix. + */ + if (gpu->identity.revision < 0x5000 && + gpu->identity.minor_features0 & chipMinorFeatures0_HZ && + !(gpu->identity.minor_features1 & + chipMinorFeatures1_DISABLE_PE_GATING)) + pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE; + + if (gpu->identity.revision < 0x5422) + pmc |= BIT(15); /* Unknown bit */ + + pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ; + pmc |= VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ; + + gpu_write(gpu, VIVS_PM_MODULE_CONTROLS, pmc); +} + static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) { u16 prefetch; @@ -506,6 +547,9 @@ static void etnaviv_gpu_hw_init(struct etnaviv_gpu *gpu) gpu_write(gpu, VIVS_MC_DEBUG_MEMORY, mc_memory_debug); } + /* enable module-level clock gating */ + etnaviv_gpu_enable_mlcg(gpu); + /* * Update GPU AXI cache atttribute to "cacheable, no allocate". * This is necessary to prevent the iMX6 SoC locking up. @@ -553,8 +597,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) bool mmuv2; ret = pm_runtime_get_sync(gpu->dev); - if (ret < 0) + if (ret < 0) { + dev_err(gpu->dev, "Failed to enable GPU power domain\n"); return ret; + } etnaviv_hw_identify(gpu); @@ -591,8 +637,10 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) } ret = etnaviv_hw_reset(gpu); - if (ret) + if (ret) { + dev_err(gpu->dev, "GPU reset failed\n"); goto fail; + } /* Setup IOMMU.. eventually we will (I think) do this once per context * and have separate page tables per context. For now, to keep things @@ -610,12 +658,14 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu) } if (!iommu) { + dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n"); ret = -ENOMEM; goto fail; } gpu->mmu = etnaviv_iommu_new(gpu, iommu, version); if (!gpu->mmu) { + dev_err(gpu->dev, "Failed to instantiate GPU IOMMU\n"); iommu_domain_free(iommu); ret = -ENOMEM; goto fail; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c index 522cfd447892..16353ee81651 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_iommu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu.c @@ -225,6 +225,7 @@ struct iommu_domain *etnaviv_iommu_domain_alloc(struct etnaviv_gpu *gpu) etnaviv_domain->domain.type = __IOMMU_DOMAIN_PAGING; etnaviv_domain->domain.ops = &etnaviv_iommu_ops.ops; + etnaviv_domain->domain.pgsize_bitmap = SZ_4K; etnaviv_domain->domain.geometry.aperture_start = GPU_MEM_START; etnaviv_domain->domain.geometry.aperture_end = GPU_MEM_START + PT_ENTRIES * SZ_4K - 1; diff --git a/drivers/gpu/drm/etnaviv/state_hi.xml.h b/drivers/gpu/drm/etnaviv/state_hi.xml.h index 6a7de5f1454a..807a3d9e0dd5 100644 --- a/drivers/gpu/drm/etnaviv/state_hi.xml.h +++ b/drivers/gpu/drm/etnaviv/state_hi.xml.h @@ -218,6 +218,13 @@ Copyright (C) 2015 #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_FE 0x00000001 #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_DE 0x00000002 #define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PE 0x00000004 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SH 0x00000008 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_PA 0x00000010 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_SE 0x00000020 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA 0x00000040 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_TX 0x00000080 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_EZ 0x00010000 +#define VIVS_PM_MODULE_CONTROLS_DISABLE_MODULE_CLOCK_GATING_RA_HZ 0x00020000 #define VIVS_PM_MODULE_STATUS 0x00000108 #define VIVS_PM_MODULE_STATUS_MODULE_CLOCK_GATED_FE 0x00000001 diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig index d814b3048ee5..1c7e14cf2781 100644 --- a/drivers/gpu/drm/exynos/Kconfig +++ b/drivers/gpu/drm/exynos/Kconfig @@ -2,10 +2,6 @@ config DRM_EXYNOS tristate "DRM Support for Samsung SoC EXYNOS Series" depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM) select DRM_KMS_HELPER - select DRM_KMS_FB_HELPER - select FB_CFB_FILLRECT - select FB_CFB_COPYAREA - select FB_CFB_IMAGEBLIT select VIDEOMODE_HELPERS help Choose this option if you have a Samsung SoC EXYNOS chipset. diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index f6223f907c15..7f9901b7777b 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -31,7 +31,6 @@ #include "exynos_drm_plane.h" #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" -#include "exynos_drm_fbdev.h" #include "exynos_drm_iommu.h" /* diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c index 468498e3fec1..4c1fb3f8b5a6 100644 --- a/drivers/gpu/drm/exynos/exynos_dp.c +++ b/drivers/gpu/drm/exynos/exynos_dp.c @@ -34,7 +34,7 @@ struct exynos_dp_device { struct drm_encoder encoder; - struct drm_connector connector; + struct drm_connector *connector; struct drm_bridge *ptn_bridge; struct drm_device *drm_dev; struct device *dev; @@ -70,7 +70,7 @@ static int exynos_dp_poweroff(struct analogix_dp_plat_data *plat_data) static int exynos_dp_get_modes(struct analogix_dp_plat_data *plat_data) { struct exynos_dp_device *dp = to_dp(plat_data); - struct drm_connector *connector = &dp->connector; + struct drm_connector *connector = dp->connector; struct drm_display_mode *mode; int num_modes = 0; @@ -103,6 +103,7 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data, int ret; drm_connector_register(connector); + dp->connector = connector; /* Pre-empt DP connector creation if there's a bridge */ if (dp->ptn_bridge) { diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c index 011211e4167d..edbd98ff293e 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_core.c +++ b/drivers/gpu/drm/exynos/exynos_drm_core.c @@ -15,7 +15,6 @@ #include <drm/drmP.h> #include "exynos_drm_drv.h" #include "exynos_drm_crtc.h" -#include "exynos_drm_fbdev.h" static LIST_HEAD(exynos_drm_subdrv_list); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 3efe1aa89416..d47216488985 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -30,7 +30,6 @@ #include "exynos_drm_drv.h" #include "exynos_drm_fb.h" -#include "exynos_drm_fbdev.h" #include "exynos_drm_crtc.h" #include "exynos_drm_plane.h" #include "exynos_drm_iommu.h" @@ -120,7 +119,6 @@ static struct fimd_driver_data s3c64xx_fimd_driver_data = { .timing_base = 0x0, .has_clksel = 1, .has_limited_fmt = 1, - .has_hw_trigger = 1, }; static struct fimd_driver_data exynos3_fimd_driver_data = { @@ -171,14 +169,11 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = { .lcdblk_vt_shift = 24, .lcdblk_bypass_shift = 15, .lcdblk_mic_bypass_shift = 11, - .trg_type = I80_HW_TRG, .has_shadowcon = 1, .has_vidoutcon = 1, .has_vtsel = 1, .has_mic_bypass = 1, .has_dp_clk = 1, - .has_hw_trigger = 1, - .has_trigger_per_te = 1, }; struct fimd_context { diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 493552368295..8564c3da0d22 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -48,13 +48,13 @@ /* registers for base address */ #define G2D_SRC_BASE_ADDR 0x0304 -#define G2D_SRC_STRIDE_REG 0x0308 +#define G2D_SRC_STRIDE 0x0308 #define G2D_SRC_COLOR_MODE 0x030C #define G2D_SRC_LEFT_TOP 0x0310 #define G2D_SRC_RIGHT_BOTTOM 0x0314 #define G2D_SRC_PLANE2_BASE_ADDR 0x0318 #define G2D_DST_BASE_ADDR 0x0404 -#define G2D_DST_STRIDE_REG 0x0408 +#define G2D_DST_STRIDE 0x0408 #define G2D_DST_COLOR_MODE 0x040C #define G2D_DST_LEFT_TOP 0x0410 #define G2D_DST_RIGHT_BOTTOM 0x0414 @@ -563,7 +563,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset) switch (reg_offset) { case G2D_SRC_BASE_ADDR: - case G2D_SRC_STRIDE_REG: + case G2D_SRC_STRIDE: case G2D_SRC_COLOR_MODE: case G2D_SRC_LEFT_TOP: case G2D_SRC_RIGHT_BOTTOM: @@ -573,7 +573,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset) reg_type = REG_TYPE_SRC_PLANE2; break; case G2D_DST_BASE_ADDR: - case G2D_DST_STRIDE_REG: + case G2D_DST_STRIDE: case G2D_DST_COLOR_MODE: case G2D_DST_LEFT_TOP: case G2D_DST_RIGHT_BOTTOM: @@ -968,8 +968,8 @@ static int g2d_check_reg_offset(struct device *dev, } else buf_info->types[reg_type] = BUF_TYPE_GEM; break; - case G2D_SRC_STRIDE_REG: - case G2D_DST_STRIDE_REG: + case G2D_SRC_STRIDE: + case G2D_DST_STRIDE: if (for_addr) goto err; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c index 706de3278f1c..3371635cd4d7 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_crtc.c @@ -44,6 +44,8 @@ static void fsl_dcu_drm_disable_crtc(struct drm_crtc *crtc) struct drm_device *dev = crtc->dev; struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + drm_crtc_vblank_off(crtc); + regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, DCU_MODE_DCU_MODE_MASK, DCU_MODE_DCU_MODE(DCU_MODE_OFF)); @@ -61,6 +63,8 @@ static void fsl_dcu_drm_crtc_enable(struct drm_crtc *crtc) DCU_MODE_DCU_MODE(DCU_MODE_NORMAL)); regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, DCU_UPDATE_MODE_READREG); + + drm_crtc_vblank_on(crtc); } static void fsl_dcu_drm_crtc_mode_set_nofb(struct drm_crtc *crtc) @@ -137,9 +141,10 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev) { struct drm_plane *primary; struct drm_crtc *crtc = &fsl_dev->crtc; - unsigned int i, j, reg_num; int ret; + fsl_dcu_drm_init_planes(fsl_dev->drm); + primary = fsl_dcu_drm_primary_create_plane(fsl_dev->drm); if (!primary) return -ENOMEM; @@ -153,19 +158,5 @@ int fsl_dcu_drm_crtc_create(struct fsl_dcu_drm_device *fsl_dev) drm_crtc_helper_add(crtc, &fsl_dcu_drm_crtc_helper_funcs); - if (!strcmp(fsl_dev->soc->name, "ls1021a")) - reg_num = LS1021A_LAYER_REG_NUM; - else - reg_num = VF610_LAYER_REG_NUM; - for (i = 0; i < fsl_dev->soc->total_layer; i++) { - for (j = 1; j <= reg_num; j++) - regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0); - } - regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, - DCU_MODE_DCU_MODE_MASK, - DCU_MODE_DCU_MODE(DCU_MODE_OFF)); - regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, - DCU_UPDATE_MODE_READREG); - return 0; } diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 33727d5d826a..7882387f9bff 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -11,6 +11,7 @@ #include <linux/clk.h> #include <linux/clk-provider.h> +#include <linux/console.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/mm.h> @@ -22,6 +23,7 @@ #include <linux/regmap.h> #include <drm/drmP.h> +#include <drm/drm_atomic_helper.h> #include <drm/drm_crtc_helper.h> #include <drm/drm_fb_cma_helper.h> #include <drm/drm_gem_cma_helper.h> @@ -42,7 +44,6 @@ static const struct regmap_config fsl_dcu_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, - .cache_type = REGCACHE_RBTREE, .volatile_reg = fsl_dcu_drm_is_volatile_reg, }; @@ -228,11 +229,26 @@ static int fsl_dcu_drm_pm_suspend(struct device *dev) if (!fsl_dev) return 0; + disable_irq(fsl_dev->irq); drm_kms_helper_poll_disable(fsl_dev->drm); - regcache_cache_only(fsl_dev->regmap, true); - regcache_mark_dirty(fsl_dev->regmap); - clk_disable(fsl_dev->clk); - clk_unprepare(fsl_dev->clk); + + console_lock(); + drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 1); + console_unlock(); + + fsl_dev->state = drm_atomic_helper_suspend(fsl_dev->drm); + if (IS_ERR(fsl_dev->state)) { + console_lock(); + drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); + console_unlock(); + + drm_kms_helper_poll_enable(fsl_dev->drm); + enable_irq(fsl_dev->irq); + return PTR_ERR(fsl_dev->state); + } + + clk_disable_unprepare(fsl_dev->pix_clk); + clk_disable_unprepare(fsl_dev->clk); return 0; } @@ -245,21 +261,27 @@ static int fsl_dcu_drm_pm_resume(struct device *dev) if (!fsl_dev) return 0; - ret = clk_enable(fsl_dev->clk); + ret = clk_prepare_enable(fsl_dev->clk); if (ret < 0) { dev_err(dev, "failed to enable dcu clk\n"); - clk_unprepare(fsl_dev->clk); return ret; } - ret = clk_prepare(fsl_dev->clk); + + ret = clk_prepare_enable(fsl_dev->pix_clk); if (ret < 0) { - dev_err(dev, "failed to prepare dcu clk\n"); + dev_err(dev, "failed to enable pix clk\n"); return ret; } + fsl_dcu_drm_init_planes(fsl_dev->drm); + drm_atomic_helper_resume(fsl_dev->drm, fsl_dev->state); + + console_lock(); + drm_fbdev_cma_set_suspend(fsl_dev->fbdev, 0); + console_unlock(); + drm_kms_helper_poll_enable(fsl_dev->drm); - regcache_cache_only(fsl_dev->regmap, false); - regcache_sync(fsl_dev->regmap); + enable_irq(fsl_dev->irq); return 0; } @@ -273,12 +295,14 @@ static const struct fsl_dcu_soc_data fsl_dcu_ls1021a_data = { .name = "ls1021a", .total_layer = 16, .max_layer = 4, + .layer_regs = LS1021A_LAYER_REG_NUM, }; static const struct fsl_dcu_soc_data fsl_dcu_vf610_data = { .name = "vf610", .total_layer = 64, .max_layer = 6, + .layer_regs = VF610_LAYER_REG_NUM, }; static const struct of_device_id fsl_dcu_of_match[] = { diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h index c275f900ff23..3b371fe7491e 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.h @@ -175,6 +175,7 @@ struct fsl_dcu_soc_data { unsigned int total_layer; /*max layer number DCU supported*/ unsigned int max_layer; + unsigned int layer_regs; }; struct fsl_dcu_drm_device { @@ -193,6 +194,7 @@ struct fsl_dcu_drm_device { struct drm_encoder encoder; struct fsl_dcu_drm_connector connector; const struct fsl_dcu_soc_data *soc; + struct drm_atomic_state *state; }; void fsl_dcu_fbdev_init(struct drm_device *dev); diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c index 274558b3b32b..e50467a0deb0 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.c @@ -217,6 +217,22 @@ static const u32 fsl_dcu_drm_plane_formats[] = { DRM_FORMAT_YUV422, }; +void fsl_dcu_drm_init_planes(struct drm_device *dev) +{ + struct fsl_dcu_drm_device *fsl_dev = dev->dev_private; + int i, j; + + for (i = 0; i < fsl_dev->soc->total_layer; i++) { + for (j = 1; j <= fsl_dev->soc->layer_regs; j++) + regmap_write(fsl_dev->regmap, DCU_CTRLDESCLN(i, j), 0); + } + regmap_update_bits(fsl_dev->regmap, DCU_DCU_MODE, + DCU_MODE_DCU_MODE_MASK, + DCU_MODE_DCU_MODE(DCU_MODE_OFF)); + regmap_write(fsl_dev->regmap, DCU_UPDATE_MODE, + DCU_UPDATE_MODE_READREG); +} + struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev) { struct drm_plane *primary; diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h index d657f088d859..8ee45f813ee8 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_plane.h @@ -12,6 +12,7 @@ #ifndef __FSL_DCU_DRM_PLANE_H__ #define __FSL_DCU_DRM_PLANE_H__ +void fsl_dcu_drm_init_planes(struct drm_device *dev); struct drm_plane *fsl_dcu_drm_primary_create_plane(struct drm_device *dev); #endif /* __FSL_DCU_DRM_PLANE_H__ */ diff --git a/drivers/gpu/drm/hisilicon/kirin/Kconfig b/drivers/gpu/drm/hisilicon/kirin/Kconfig index ea0df6115f7e..499f64405dac 100644 --- a/drivers/gpu/drm/hisilicon/kirin/Kconfig +++ b/drivers/gpu/drm/hisilicon/kirin/Kconfig @@ -4,6 +4,7 @@ config DRM_HISI_KIRIN select DRM_KMS_HELPER select DRM_GEM_CMA_HELPER select DRM_KMS_CMA_HELPER + select HISI_KIRIN_DW_DSI help Choose this option if you have a hisilicon Kirin chipsets(hi6220). If M is selected the module will be called kirin-drm. diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c index ed76baad525f..c3707d47cd89 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c @@ -487,6 +487,7 @@ static void ade_crtc_enable(struct drm_crtc *crtc) ade_set_medianoc_qos(acrtc); ade_display_enable(acrtc); ade_dump_regs(ctx->base); + drm_crtc_vblank_on(crtc); acrtc->enable = true; } @@ -498,6 +499,7 @@ static void ade_crtc_disable(struct drm_crtc *crtc) if (!acrtc->enable) return; + drm_crtc_vblank_off(crtc); ade_power_down(ctx); acrtc->enable = false; } @@ -965,21 +967,21 @@ static int ade_dts_parse(struct platform_device *pdev, struct ade_hw_ctx *ctx) } ctx->ade_core_clk = devm_clk_get(dev, "clk_ade_core"); - if (!ctx->ade_core_clk) { + if (IS_ERR(ctx->ade_core_clk)) { DRM_ERROR("failed to parse clk ADE_CORE\n"); - return -ENODEV; + return PTR_ERR(ctx->ade_core_clk); } ctx->media_noc_clk = devm_clk_get(dev, "clk_codec_jpeg"); - if (!ctx->media_noc_clk) { + if (IS_ERR(ctx->media_noc_clk)) { DRM_ERROR("failed to parse clk CODEC_JPEG\n"); - return -ENODEV; + return PTR_ERR(ctx->media_noc_clk); } ctx->ade_pix_clk = devm_clk_get(dev, "clk_ade_pix"); - if (!ctx->ade_pix_clk) { + if (IS_ERR(ctx->ade_pix_clk)) { DRM_ERROR("failed to parse clk ADE_PIX\n"); - return -ENODEV; + return PTR_ERR(ctx->ade_pix_clk); } return 0; diff --git a/drivers/gpu/drm/i2c/ch7006_drv.c b/drivers/gpu/drm/i2c/ch7006_drv.c index 0594c45f7164..e9e8ae2ec06b 100644 --- a/drivers/gpu/drm/i2c/ch7006_drv.c +++ b/drivers/gpu/drm/i2c/ch7006_drv.c @@ -361,13 +361,8 @@ static int ch7006_encoder_set_property(struct drm_encoder *encoder, /* Disable the crtc to ensure a full modeset is * performed whenever it's turned on again. */ - if (crtc) { - struct drm_mode_set modeset = { - .crtc = crtc, - }; - - drm_mode_set_config_internal(&modeset); - } + if (crtc) + drm_crtc_force_disable(crtc); } return 0; diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index ba812ef2c9d1..334562d06731 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1535,7 +1535,7 @@ static int mtk_hdmi_dt_parse_pdata(struct mtk_hdmi *hdmi, * HDMI audio codec callbacks */ -static int mtk_hdmi_audio_hw_params(struct device *dev, +static int mtk_hdmi_audio_hw_params(struct device *dev, void *data, struct hdmi_codec_daifmt *daifmt, struct hdmi_codec_params *params) { @@ -1604,7 +1604,7 @@ static int mtk_hdmi_audio_hw_params(struct device *dev, return 0; } -static int mtk_hdmi_audio_startup(struct device *dev) +static int mtk_hdmi_audio_startup(struct device *dev, void *data) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); @@ -1615,7 +1615,7 @@ static int mtk_hdmi_audio_startup(struct device *dev) return 0; } -static void mtk_hdmi_audio_shutdown(struct device *dev) +static void mtk_hdmi_audio_shutdown(struct device *dev, void *data) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); @@ -1624,7 +1624,7 @@ static void mtk_hdmi_audio_shutdown(struct device *dev) mtk_hdmi_audio_disable(hdmi); } -int mtk_hdmi_audio_digital_mute(struct device *dev, bool enable) +int mtk_hdmi_audio_digital_mute(struct device *dev, void *data, bool enable) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); @@ -1638,7 +1638,7 @@ int mtk_hdmi_audio_digital_mute(struct device *dev, bool enable) return 0; } -static int mtk_hdmi_audio_get_eld(struct device *dev, uint8_t *buf, size_t len) +static int mtk_hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) { struct mtk_hdmi *hdmi = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c index cf8f38d39e10..1c366f8cb2d0 100644 --- a/drivers/gpu/drm/mediatek/mtk_mipi_tx.c +++ b/drivers/gpu/drm/mediatek/mtk_mipi_tx.c @@ -431,7 +431,7 @@ static int mtk_mipi_tx_probe(struct platform_device *pdev) phy_set_drvdata(phy, mipi_tx); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - if (IS_ERR(phy)) { + if (IS_ERR(phy_provider)) { ret = PTR_ERR(phy_provider); return ret; } diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 9d5083d0f1ee..68268e55d595 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -186,17 +186,6 @@ static void mgag200_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_r { } -static int mgag200_bo_move(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, - struct ttm_mem_reg *new_mem) -{ - int r; - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); - return r; -} - - static void mgag200_ttm_backend_destroy(struct ttm_tt *tt) { ttm_tt_fini(tt); @@ -241,7 +230,7 @@ struct ttm_bo_driver mgag200_bo_driver = { .ttm_tt_unpopulate = mgag200_ttm_tt_unpopulate, .init_mem_type = mgag200_bo_init_mem_type, .evict_flags = mgag200_bo_evict_flags, - .move = mgag200_bo_move, + .move = NULL, .verify_access = mgag200_bo_verify_access, .io_mem_reserve = &mgag200_ttm_io_mem_reserve, .io_mem_free = &mgag200_ttm_io_mem_free, diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index fbe304ee6c80..2aec27dbb5bb 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -408,7 +408,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, } adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo); - if (!adreno_gpu->memptrs) { + if (IS_ERR(adreno_gpu->memptrs)) { dev_err(drm->dev, "could not vmap memptrs\n"); return -ENOMEM; } diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c index 1a061e3e8b9e..a9223bea871b 100644 --- a/drivers/gpu/drm/msm/msm_fbdev.c +++ b/drivers/gpu/drm/msm/msm_fbdev.c @@ -159,6 +159,10 @@ static int msm_fbdev_create(struct drm_fb_helper *helper, dev->mode_config.fb_base = paddr; fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo); + if (IS_ERR(fbi->screen_base)) { + ret = PTR_ERR(fbi->screen_base); + goto fail_unlock; + } fbi->screen_size = fbdev->bo->size; fbi->fix.smem_start = paddr; fbi->fix.smem_len = fbdev->bo->size; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 7daf4054dd2b..69836f5685b1 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -398,6 +398,8 @@ void *msm_gem_vaddr_locked(struct drm_gem_object *obj) return ERR_CAST(pages); msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT, VM_MAP, pgprot_writecombine(PAGE_KERNEL)); + if (msm_obj->vaddr == NULL) + return ERR_PTR(-ENOMEM); } return msm_obj->vaddr; } diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b89ca5174863..eb4bb8b2f3a5 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -40,12 +40,14 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, submit->dev = dev; submit->gpu = gpu; + submit->fence = NULL; submit->pid = get_pid(task_pid(current)); /* initially, until copy_from_user() and bo lookup succeeds: */ submit->nr_bos = 0; submit->nr_cmds = 0; + INIT_LIST_HEAD(&submit->node); INIT_LIST_HEAD(&submit->bo_list); ww_acquire_init(&submit->ticket, &reservation_ww_class); @@ -75,6 +77,11 @@ static int submit_lookup_objects(struct msm_gem_submit *submit, void __user *userptr = u64_to_user_ptr(args->bos + (i * sizeof(submit_bo))); + /* make sure we don't have garbage flags, in case we hit + * error path before flags is initialized: + */ + submit->bos[i].flags = 0; + ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo)); if (ret) { ret = -EFAULT; diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index b48f73ac6389..0857710c2ff2 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -312,6 +312,9 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit) struct msm_gem_object *obj = submit->bos[idx].obj; const char *buf = msm_gem_vaddr_locked(&obj->base); + if (IS_ERR(buf)) + continue; + buf += iova - submit->bos[idx].iova; rd_write_section(rd, RD_GPUADDR, diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 1f14b908b221..42f5359cf988 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -40,6 +40,10 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) } ring->start = msm_gem_vaddr_locked(ring->bo); + if (IS_ERR(ring->start)) { + ret = PTR_ERR(ring->start); + goto fail; + } ring->end = ring->start + (size / 4); ring->cur = ring->start; diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c index aea81a547e85..34c0f2f67548 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c @@ -125,18 +125,8 @@ nv04_display_destroy(struct drm_device *dev) struct nv04_display *disp = nv04_display(dev); struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_encoder *encoder; - struct drm_crtc *crtc; struct nouveau_crtc *nv_crtc; - /* Turn every CRTC off. */ - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - struct drm_mode_set modeset = { - .crtc = crtc, - }; - - drm_mode_set_config_internal(&modeset); - } - /* Restore state */ list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.base.head) encoder->enc_restore(&encoder->base.base); diff --git a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c index a665b78b2af5..434d1e29f279 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c +++ b/drivers/gpu/drm/nouveau/dispnv04/tvnv17.c @@ -749,13 +749,8 @@ static int nv17_tv_set_property(struct drm_encoder *encoder, /* Disable the crtc to ensure a full modeset is * performed whenever it's turned on again. */ - if (crtc) { - struct drm_mode_set modeset = { - .crtc = crtc, - }; - - drm_mode_set_config_internal(&modeset); - } + if (crtc) + drm_crtc_force_disable(crtc); } return 0; diff --git a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h index 331620a52afa..287a7d6fa480 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/cl0080.h +++ b/drivers/gpu/drm/nouveau/include/nvif/cl0080.h @@ -29,6 +29,7 @@ struct nv_device_info_v0 { #define NV_DEVICE_INFO_V0_FERMI 0x07 #define NV_DEVICE_INFO_V0_KEPLER 0x08 #define NV_DEVICE_INFO_V0_MAXWELL 0x09 +#define NV_DEVICE_INFO_V0_PASCAL 0x0a __u8 family; __u8 pad06[2]; __u64 ram_size; diff --git a/drivers/gpu/drm/nouveau/include/nvif/class.h b/drivers/gpu/drm/nouveau/include/nvif/class.h index 982aad8fa645..e6e9537537cf 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/class.h +++ b/drivers/gpu/drm/nouveau/include/nvif/class.h @@ -39,6 +39,7 @@ #define KEPLER_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000a06f #define KEPLER_CHANNEL_GPFIFO_B /* cla06f.h */ 0x0000a16f #define MAXWELL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000b06f +#define PASCAL_CHANNEL_GPFIFO_A /* cla06f.h */ 0x0000c06f #define NV50_DISP /* cl5070.h */ 0x00005070 #define G82_DISP /* cl5070.h */ 0x00008270 @@ -50,6 +51,8 @@ #define GK110_DISP /* cl5070.h */ 0x00009270 #define GM107_DISP /* cl5070.h */ 0x00009470 #define GM200_DISP /* cl5070.h */ 0x00009570 +#define GP100_DISP /* cl5070.h */ 0x00009770 +#define GP104_DISP /* cl5070.h */ 0x00009870 #define NV31_MPEG 0x00003174 #define G82_MPEG 0x00008274 @@ -86,6 +89,8 @@ #define GK110_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000927d #define GM107_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000947d #define GM200_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000957d +#define GP100_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000977d +#define GP104_DISP_CORE_CHANNEL_DMA /* cl507d.h */ 0x0000987d #define NV50_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000507e #define G82_DISP_OVERLAY_CHANNEL_DMA /* cl507e.h */ 0x0000827e @@ -105,6 +110,8 @@ #define MAXWELL_A /* cl9097.h */ 0x0000b097 #define MAXWELL_B /* cl9097.h */ 0x0000b197 +#define PASCAL_A /* cl9097.h */ 0x0000c097 + #define NV74_BSP 0x000074b0 #define GT212_MSVLD 0x000085b1 @@ -128,6 +135,8 @@ #define FERMI_DMA 0x000090b5 #define KEPLER_DMA_COPY_A 0x0000a0b5 #define MAXWELL_DMA_COPY_A 0x0000b0b5 +#define PASCAL_DMA_COPY_A 0x0000c0b5 +#define PASCAL_DMA_COPY_B 0x0000c1b5 #define FERMI_DECOMPRESS 0x000090b8 @@ -137,6 +146,7 @@ #define KEPLER_COMPUTE_B 0x0000a1c0 #define MAXWELL_COMPUTE_A 0x0000b0c0 #define MAXWELL_COMPUTE_B 0x0000b1c0 +#define PASCAL_COMPUTE_A 0x0000c0c0 #define NV74_CIPHER 0x000074c1 #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h index c612dc1f1eb4..7ea8aa7ca408 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/device.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/device.h @@ -16,9 +16,9 @@ enum nvkm_devidx { NVKM_SUBDEV_MC, NVKM_SUBDEV_BUS, NVKM_SUBDEV_TIMER, + NVKM_SUBDEV_INSTMEM, NVKM_SUBDEV_FB, NVKM_SUBDEV_LTC, - NVKM_SUBDEV_INSTMEM, NVKM_SUBDEV_MMU, NVKM_SUBDEV_BAR, NVKM_SUBDEV_PMU, @@ -33,7 +33,10 @@ enum nvkm_devidx { NVKM_ENGINE_CE0, NVKM_ENGINE_CE1, NVKM_ENGINE_CE2, - NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE2, + NVKM_ENGINE_CE3, + NVKM_ENGINE_CE4, + NVKM_ENGINE_CE5, + NVKM_ENGINE_CE_LAST = NVKM_ENGINE_CE5, NVKM_ENGINE_CIPHER, NVKM_ENGINE_DISP, @@ -50,7 +53,8 @@ enum nvkm_devidx { NVKM_ENGINE_NVENC0, NVKM_ENGINE_NVENC1, - NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC1, + NVKM_ENGINE_NVENC2, + NVKM_ENGINE_NVENC_LAST = NVKM_ENGINE_NVENC2, NVKM_ENGINE_NVDEC, NVKM_ENGINE_PM, @@ -102,6 +106,7 @@ struct nvkm_device { NV_C0 = 0xc0, NV_E0 = 0xe0, GM100 = 0x110, + GP100 = 0x130, } card_type; u32 chipset; u8 chiprev; @@ -136,7 +141,7 @@ struct nvkm_device { struct nvkm_volt *volt; struct nvkm_engine *bsp; - struct nvkm_engine *ce[3]; + struct nvkm_engine *ce[6]; struct nvkm_engine *cipher; struct nvkm_disp *disp; struct nvkm_dma *dma; @@ -149,7 +154,7 @@ struct nvkm_device { struct nvkm_engine *mspdec; struct nvkm_engine *msppp; struct nvkm_engine *msvld; - struct nvkm_engine *nvenc[2]; + struct nvkm_engine *nvenc[3]; struct nvkm_engine *nvdec; struct nvkm_pm *pm; struct nvkm_engine *sec; @@ -170,7 +175,6 @@ struct nvkm_device_func { void (*fini)(struct nvkm_device *, bool suspend); resource_size_t (*resource_addr)(struct nvkm_device *, unsigned bar); resource_size_t (*resource_size)(struct nvkm_device *, unsigned bar); - bool cpu_coherent; }; struct nvkm_device_quirk { @@ -206,7 +210,7 @@ struct nvkm_device_chip { int (*volt )(struct nvkm_device *, int idx, struct nvkm_volt **); int (*bsp )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*ce[3] )(struct nvkm_device *, int idx, struct nvkm_engine **); + int (*ce[6] )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*cipher )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*disp )(struct nvkm_device *, int idx, struct nvkm_disp **); int (*dma )(struct nvkm_device *, int idx, struct nvkm_dma **); @@ -219,7 +223,7 @@ struct nvkm_device_chip { int (*mspdec )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*msppp )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*msvld )(struct nvkm_device *, int idx, struct nvkm_engine **); - int (*nvenc[2])(struct nvkm_device *, int idx, struct nvkm_engine **); + int (*nvenc[3])(struct nvkm_device *, int idx, struct nvkm_engine **); int (*nvdec )(struct nvkm_device *, int idx, struct nvkm_engine **); int (*pm )(struct nvkm_device *, int idx, struct nvkm_pm **); int (*sec )(struct nvkm_device *, int idx, struct nvkm_engine **); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h index b5370cb56e3c..e5c9b6268dcc 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/core/tegra.h @@ -28,6 +28,7 @@ struct nvkm_device_tegra { } iommu; int gpu_speedo; + int gpu_speedo_id; }; struct nvkm_device_tegra_func { diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h index 594d719ba41e..d3d26a1e215d 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/ce.h @@ -7,4 +7,6 @@ int gf100_ce_new(struct nvkm_device *, int, struct nvkm_engine **); int gk104_ce_new(struct nvkm_device *, int, struct nvkm_engine **); int gm107_ce_new(struct nvkm_device *, int, struct nvkm_engine **); int gm200_ce_new(struct nvkm_device *, int, struct nvkm_engine **); +int gp100_ce_new(struct nvkm_device *, int, struct nvkm_engine **); +int gp104_ce_new(struct nvkm_device *, int, struct nvkm_engine **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h index d4fdce27b297..e82049667ce4 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/disp.h @@ -32,4 +32,6 @@ int gk104_disp_new(struct nvkm_device *, int, struct nvkm_disp **); int gk110_disp_new(struct nvkm_device *, int, struct nvkm_disp **); int gm107_disp_new(struct nvkm_device *, int, struct nvkm_disp **); int gm200_disp_new(struct nvkm_device *, int, struct nvkm_disp **); +int gp100_disp_new(struct nvkm_device *, int, struct nvkm_disp **); +int gp104_disp_new(struct nvkm_device *, int, struct nvkm_disp **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h index 15ddfcf5e8db..ed92fec5292c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/fifo.h @@ -66,4 +66,5 @@ int gk20a_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); int gm107_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); int gm200_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); int gm20b_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); +int gp100_fifo_new(struct nvkm_device *, int, struct nvkm_fifo **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h index 6515f5810a26..89cf99307828 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/engine/gr.h @@ -42,4 +42,5 @@ int gk20a_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gm107_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gm200_gr_new(struct nvkm_device *, int, struct nvkm_gr **); int gm20b_gr_new(struct nvkm_device *, int, struct nvkm_gr **); +int gp100_gr_new(struct nvkm_device *, int, struct nvkm_gr **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h index e39a1fea930b..a72f3290528a 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios.h @@ -7,6 +7,9 @@ struct nvkm_bios { u32 size; u8 *data; + u32 image0_size; + u32 imaged_addr; + u32 bmp_offset; u32 bit_offset; @@ -22,10 +25,9 @@ struct nvkm_bios { u8 nvbios_checksum(const u8 *data, int size); u16 nvbios_findstr(const u8 *data, int size, const char *str, int len); int nvbios_memcmp(struct nvkm_bios *, u32 addr, const char *, u32 len); - -#define nvbios_rd08(b,o) (b)->data[(o)] -#define nvbios_rd16(b,o) get_unaligned_le16(&(b)->data[(o)]) -#define nvbios_rd32(b,o) get_unaligned_le32(&(b)->data[(o)]) +u8 nvbios_rd08(struct nvkm_bios *, u32 addr); +u16 nvbios_rd16(struct nvkm_bios *, u32 addr); +u32 nvbios_rd32(struct nvkm_bios *, u32 addr); int nvkm_bios_new(struct nvkm_device *, int, struct nvkm_bios **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h index db10c11f0595..c5a6ebd5a478 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/bios/disp.h @@ -25,7 +25,8 @@ u16 nvbios_outp_match(struct nvkm_bios *, u16 type, u16 mask, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_outp *); struct nvbios_ocfg { - u16 match; + u8 proto; + u8 flags; u16 clkcmp[2]; }; @@ -33,7 +34,7 @@ u16 nvbios_ocfg_entry(struct nvkm_bios *, u16 outp, u8 idx, u8 *ver, u8 *hdr, u8 *cnt, u8 *len); u16 nvbios_ocfg_parse(struct nvkm_bios *, u16 outp, u8 idx, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); -u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u16 type, +u16 nvbios_ocfg_match(struct nvkm_bios *, u16 outp, u8 proto, u8 flags, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *); u16 nvbios_oclk_match(struct nvkm_bios *, u16 cmp, u32 khz); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h index 0a734fd06acf..3a410275fa71 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/fb.h @@ -56,6 +56,8 @@ struct nvkm_fb { int regions; } tile; + u8 page; + struct nvkm_memory *mmu_rd; struct nvkm_memory *mmu_wr; }; @@ -91,6 +93,8 @@ int gk104_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gk20a_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gm107_fb_new(struct nvkm_device *, int, struct nvkm_fb **); int gm200_fb_new(struct nvkm_device *, int, struct nvkm_fb **); +int gp100_fb_new(struct nvkm_device *, int, struct nvkm_fb **); +int gp104_fb_new(struct nvkm_device *, int, struct nvkm_fb **); #include <subdev/bios.h> #include <subdev/bios/ramcfg.h> diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h index c6b90b6543b3..cd755baf9cab 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/ltc.h @@ -38,4 +38,5 @@ int gk104_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); int gk20a_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); int gm107_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); int gm200_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); +int gp100_ltc_new(struct nvkm_device *, int, struct nvkm_ltc **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h index 2e80682b2da1..27d25b18d85c 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/mc.h @@ -7,11 +7,14 @@ struct nvkm_mc { struct nvkm_subdev subdev; }; -void nvkm_mc_intr(struct nvkm_mc *, bool *handled); -void nvkm_mc_intr_unarm(struct nvkm_mc *); -void nvkm_mc_intr_rearm(struct nvkm_mc *); -void nvkm_mc_reset(struct nvkm_mc *, enum nvkm_devidx); -void nvkm_mc_unk260(struct nvkm_mc *, u32 data); +void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx); +void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx); +void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx); +void nvkm_mc_intr(struct nvkm_device *, bool *handled); +void nvkm_mc_intr_unarm(struct nvkm_device *); +void nvkm_mc_intr_rearm(struct nvkm_device *); +void nvkm_mc_intr_mask(struct nvkm_device *, enum nvkm_devidx, bool enable); +void nvkm_mc_unk260(struct nvkm_device *, u32 data); int nv04_mc_new(struct nvkm_device *, int, struct nvkm_mc **); int nv11_mc_new(struct nvkm_device *, int, struct nvkm_mc **); @@ -24,4 +27,5 @@ int gt215_mc_new(struct nvkm_device *, int, struct nvkm_mc **); int gf100_mc_new(struct nvkm_device *, int, struct nvkm_mc **); int gk104_mc_new(struct nvkm_device *, int, struct nvkm_mc **); int gk20a_mc_new(struct nvkm_device *, int, struct nvkm_mc **); +int gp100_mc_new(struct nvkm_device *, int, struct nvkm_mc **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h index ddb913889d7e..e6523e2cea9f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/pci.h @@ -47,6 +47,7 @@ int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **); int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **); int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **); int gk104_pci_new(struct nvkm_device *, int, struct nvkm_pci **); +int gp100_pci_new(struct nvkm_device *, int, struct nvkm_pci **); /* pcie functions */ int nvkm_pcie_set_link(struct nvkm_pci *, enum nvkm_pcie_speed, u8 width); diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h index c6edd95a5b69..b04c38c07761 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/secboot.h @@ -43,9 +43,8 @@ struct nvkm_secboot { const struct nvkm_secboot_func *func; struct nvkm_subdev subdev; + enum nvkm_devidx devidx; u32 base; - u32 irq_mask; - u32 enable_mask; }; #define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev) diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h index 8fb575a92c48..71ebbfd4484f 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/top.h @@ -8,10 +8,11 @@ struct nvkm_top { struct list_head device; }; -u32 nvkm_top_reset(struct nvkm_top *, enum nvkm_devidx); -u32 nvkm_top_intr(struct nvkm_top *, u32 intr, u64 *subdevs); -enum nvkm_devidx nvkm_top_fault(struct nvkm_top *, int fault); -enum nvkm_devidx nvkm_top_engine(struct nvkm_top *, int, int *runl, int *engn); +u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx); +u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs); +u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx); +enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault); +enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn); int gk104_top_new(struct nvkm_device *, int, struct nvkm_top **); #endif diff --git a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h index feff55cff05b..b765f4ffcde6 100644 --- a/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h +++ b/drivers/gpu/drm/nouveau/include/nvkm/subdev/volt.h @@ -12,6 +12,9 @@ struct nvkm_volt { u32 uv; u8 vid; } vid[256]; + + u32 max_uv; + u32 min_uv; }; int nvkm_volt_get(struct nvkm_volt *); diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c index eb7de487a2b3..7bd4683216d0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_abi16.c +++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c @@ -100,6 +100,7 @@ nouveau_abi16_swclass(struct nouveau_drm *drm) case NV_DEVICE_INFO_V0_FERMI: case NV_DEVICE_INFO_V0_KEPLER: case NV_DEVICE_INFO_V0_MAXWELL: + case NV_DEVICE_INFO_V0_PASCAL: return NVIF_CLASS_SW_GF100; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 5e3f3e826476..528bdeffb339 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -209,8 +209,7 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, nvbo->tile_flags = tile_flags; nvbo->bo.bdev = &drm->ttm.bdev; - if (!nvxx_device(&drm->device)->func->cpu_coherent) - nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; + nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED; nvbo->page_shift = 12; if (drm->client.vm) { @@ -424,13 +423,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo) if (ret) return ret; - /* - * TTM buffers allocated using the DMA API already have a mapping, let's - * use it instead. - */ - if (!nvbo->force_coherent) - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, - &nvbo->kmap); + ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); ttm_bo_unreserve(&nvbo->bo); return ret; @@ -442,12 +435,7 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo) if (!nvbo) return; - /* - * TTM buffers allocated using the DMA API already had a coherent - * mapping which we used, no need to unmap. - */ - if (!nvbo->force_coherent) - ttm_bo_kunmap(&nvbo->kmap); + ttm_bo_kunmap(&nvbo->kmap); } void @@ -506,35 +494,13 @@ nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, return 0; } -static inline void * -_nouveau_bo_mem_index(struct nouveau_bo *nvbo, unsigned index, void *mem, u8 sz) -{ - struct ttm_dma_tt *dma_tt; - u8 *m = mem; - - index *= sz; - - if (m) { - /* kmap'd address, return the corresponding offset */ - m += index; - } else { - /* DMA-API mapping, lookup the right address */ - dma_tt = (struct ttm_dma_tt *)nvbo->bo.ttm; - m = dma_tt->cpu_address[index / PAGE_SIZE]; - m += index % PAGE_SIZE; - } - - return m; -} -#define nouveau_bo_mem_index(o, i, m) _nouveau_bo_mem_index(o, i, m, sizeof(*m)) - void nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) { bool is_iomem; u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); - mem = nouveau_bo_mem_index(nvbo, index, mem); + mem += index; if (is_iomem) iowrite16_native(val, (void __force __iomem *)mem); @@ -548,7 +514,7 @@ nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) bool is_iomem; u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); - mem = nouveau_bo_mem_index(nvbo, index, mem); + mem += index; if (is_iomem) return ioread32_native((void __force __iomem *)mem); @@ -562,7 +528,7 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) bool is_iomem; u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); - mem = nouveau_bo_mem_index(nvbo, index, mem); + mem += index; if (is_iomem) iowrite32_native(val, (void __force __iomem *)mem); @@ -1082,7 +1048,6 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, ret = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, - no_wait_gpu, new_mem); nouveau_fence_unref(&fence); } @@ -1104,6 +1069,10 @@ nouveau_bo_move_init(struct nouveau_drm *drm) struct ttm_mem_reg *, struct ttm_mem_reg *); int (*init)(struct nouveau_channel *, u32 handle); } _methods[] = { + { "COPY", 4, 0xc1b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "GRCE", 0, 0xc1b5, nve0_bo_move_copy, nvc0_bo_move_init }, + { "COPY", 4, 0xc0b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "GRCE", 0, 0xc0b5, nve0_bo_move_copy, nvc0_bo_move_init }, { "COPY", 4, 0xb0b5, nve0_bo_move_copy, nve0_bo_move_init }, { "GRCE", 0, 0xb0b5, nve0_bo_move_copy, nvc0_bo_move_init }, { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, @@ -1289,6 +1258,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, struct nouveau_drm_tile *new_tile = NULL; int ret = 0; + ret = ttm_bo_wait(bo, intr, no_wait_gpu); + if (ret) + return ret; + if (nvbo->pin_refcnt) NV_WARN(drm, "Moving pinned object %p!\n", nvbo); @@ -1324,7 +1297,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, /* Fallback to software copy. */ ret = ttm_bo_wait(bo, intr, no_wait_gpu); if (ret == 0) - ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + ret = ttm_bo_move_memcpy(bo, evict, intr, no_wait_gpu, new_mem); out: if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) { @@ -1488,14 +1461,6 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm) dev = drm->dev; pdev = device->dev; - /* - * Objects matching this condition have been marked as force_coherent, - * so use the DMA API for them. - */ - if (!nvxx_device(&drm->device)->func->cpu_coherent && - ttm->caching_state == tt_uncached) - return ttm_dma_populate(ttm_dma, dev->dev); - #if IS_ENABLED(CONFIG_AGP) if (drm->agp.bridge) { return ttm_agp_tt_populate(ttm); @@ -1553,16 +1518,6 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) dev = drm->dev; pdev = device->dev; - /* - * Objects matching this condition have been marked as force_coherent, - * so use the DMA API for them. - */ - if (!nvxx_device(&drm->device)->func->cpu_coherent && - ttm->caching_state == tt_uncached) { - ttm_dma_unpopulate(ttm_dma, dev->dev); - return; - } - #if IS_ENABLED(CONFIG_AGP) if (drm->agp.bridge) { ttm_agp_tt_unpopulate(ttm); diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c index b1d2527c5625..f9b3c811187e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_chan.c +++ b/drivers/gpu/drm/nouveau/nouveau_chan.c @@ -191,7 +191,8 @@ static int nouveau_channel_ind(struct nouveau_drm *drm, struct nvif_device *device, u32 engine, struct nouveau_channel **pchan) { - static const u16 oclasses[] = { MAXWELL_CHANNEL_GPFIFO_A, + static const u16 oclasses[] = { PASCAL_CHANNEL_GPFIFO_A, + MAXWELL_CHANNEL_GPFIFO_A, KEPLER_CHANNEL_GPFIFO_B, KEPLER_CHANNEL_GPFIFO_A, FERMI_CHANNEL_GPFIFO, diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 6072fe292db8..afbf557b23d4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -47,7 +47,7 @@ nouveau_display_vblank_handler(struct nvif_notify *notify) { struct nouveau_crtc *nv_crtc = container_of(notify, typeof(*nv_crtc), vblank); - drm_handle_vblank(nv_crtc->base.dev, nv_crtc->index); + drm_crtc_handle_vblank(&nv_crtc->base); return NVIF_NOTIFY_KEEP; } @@ -495,6 +495,8 @@ nouveau_display_create(struct drm_device *dev) if (nouveau_modeset != 2 && drm->vbios.dcb.entries) { static const u16 oclass[] = { + GP104_DISP, + GP100_DISP, GM200_DISP, GM107_DISP, GK110_DISP, @@ -554,6 +556,7 @@ nouveau_display_destroy(struct drm_device *dev) nouveau_display_vblank_fini(dev); drm_kms_helper_poll_fini(dev); + drm_crtc_force_disable_all(dev); drm_mode_config_cleanup(dev); if (disp->dtor) diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 295e7621cc68..66c1280c0f1f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -198,6 +198,7 @@ nouveau_accel_init(struct nouveau_drm *drm) case KEPLER_CHANNEL_GPFIFO_A: case KEPLER_CHANNEL_GPFIFO_B: case MAXWELL_CHANNEL_GPFIFO_A: + case PASCAL_CHANNEL_GPFIFO_A: ret = nvc0_fence_create(drm); break; default: @@ -316,7 +317,16 @@ static int nouveau_drm_probe(struct pci_dev *pdev, if (vga_switcheroo_client_probe_defer(pdev)) return -EPROBE_DEFER; - /* remove conflicting drivers (vesafb, efifb etc) */ + /* We need to check that the chipset is supported before booting + * fbdev off the hardware, as there's no way to put it back. + */ + ret = nvkm_device_pci_new(pdev, NULL, "error", true, false, 0, &device); + if (ret) + return ret; + + nvkm_device_del(&device); + + /* Remove conflicting drivers (vesafb, efifb etc). */ aper = alloc_apertures(3); if (!aper) return -ENOMEM; @@ -430,6 +440,11 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) nouveau_vga_init(drm); if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) { + if (!nvxx_device(&drm->device)->mmu) { + ret = -ENOSYS; + goto fail_device; + } + ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40), 0x1000, NULL, &drm->client.vm); if (ret) @@ -490,7 +505,11 @@ nouveau_drm_unload(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - pm_runtime_get_sync(dev->dev); + if (nouveau_runtime_pm != 0) { + pm_runtime_get_sync(dev->dev); + pm_runtime_forbid(dev->dev); + } + nouveau_fbcon_fini(dev); nouveau_accel_fini(drm); nouveau_hwmon_fini(dev); diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 57aaf98a26f9..d1f248fd3506 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -552,6 +552,8 @@ nouveau_fbcon_init(struct drm_device *dev) if (ret) goto fini; + if (fbcon->helper.fbdev) + fbcon->helper.fbdev->pixmap.buf_align = 4; return 0; fini: diff --git a/drivers/gpu/drm/nouveau/nouveau_hwmon.c b/drivers/gpu/drm/nouveau/nouveau_hwmon.c index 1ff4166af26e..71f764bf4cc6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_hwmon.c +++ b/drivers/gpu/drm/nouveau/nouveau_hwmon.c @@ -535,6 +535,40 @@ static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, nouveau_hwmon_get_in0_input, NULL, 0); static ssize_t +nouveau_hwmon_get_in0_min(struct device *d, + struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvkm_volt *volt = nvxx_volt(&drm->device); + + if (!volt || !volt->min_uv) + return -ENODEV; + + return sprintf(buf, "%i\n", volt->min_uv / 1000); +} + +static SENSOR_DEVICE_ATTR(in0_min, S_IRUGO, + nouveau_hwmon_get_in0_min, NULL, 0); + +static ssize_t +nouveau_hwmon_get_in0_max(struct device *d, + struct device_attribute *a, char *buf) +{ + struct drm_device *dev = dev_get_drvdata(d); + struct nouveau_drm *drm = nouveau_drm(dev); + struct nvkm_volt *volt = nvxx_volt(&drm->device); + + if (!volt || !volt->max_uv) + return -ENODEV; + + return sprintf(buf, "%i\n", volt->max_uv / 1000); +} + +static SENSOR_DEVICE_ATTR(in0_max, S_IRUGO, + nouveau_hwmon_get_in0_max, NULL, 0); + +static ssize_t nouveau_hwmon_get_in0_label(struct device *d, struct device_attribute *a, char *buf) { @@ -594,6 +628,8 @@ static struct attribute *hwmon_pwm_fan_attributes[] = { static struct attribute *hwmon_in0_attributes[] = { &sensor_dev_attr_in0_input.dev_attr.attr, + &sensor_dev_attr_in0_min.dev_attr.attr, + &sensor_dev_attr_in0_max.dev_attr.attr, &sensor_dev_attr_in0_label.dev_attr.attr, NULL }; diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index bcee91497eb9..1825dbc33192 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -164,6 +164,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man, case NV_DEVICE_INFO_V0_FERMI: case NV_DEVICE_INFO_V0_KEPLER: case NV_DEVICE_INFO_V0_MAXWELL: + case NV_DEVICE_INFO_V0_PASCAL: node->memtype = (nvbo->tile_flags & 0xff00) >> 8; break; default: diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c index 0f3e4bb411cc..7d9248b8c664 100644 --- a/drivers/gpu/drm/nouveau/nv04_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c @@ -82,7 +82,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) uint32_t fg; uint32_t bg; uint32_t dsize; - uint32_t width; uint32_t *data = (uint32_t *)image->data; int ret; @@ -93,9 +92,6 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) if (ret) return ret; - width = ALIGN(image->width, 8); - dsize = ALIGN(width * image->height, 32) >> 5; - if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { fg = ((uint32_t *) info->pseudo_palette)[image->fg_color]; @@ -111,10 +107,11 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) ((image->dx + image->width) & 0xffff)); OUT_RING(chan, bg); OUT_RING(chan, fg); - OUT_RING(chan, (image->height << 16) | width); + OUT_RING(chan, (image->height << 16) | image->width); OUT_RING(chan, (image->height << 16) | image->width); OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff)); + dsize = ALIGN(image->width * image->height, 32) >> 5; while (dsize) { int iter_len = dsize > 128 ? 128 : dsize; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 7a7788212df7..7d0edcbcfca7 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -297,6 +297,8 @@ nv50_core_create(struct nvif_device *device, struct nvif_object *disp, .pushbuf = 0xb0007d00, }; static const s32 oclass[] = { + GP104_DISP_CORE_CHANNEL_DMA, + GP100_DISP_CORE_CHANNEL_DMA, GM200_DISP_CORE_CHANNEL_DMA, GM107_DISP_CORE_CHANNEL_DMA, GK110_DISP_CORE_CHANNEL_DMA, diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c index 33d9ee0fac40..1aeb698e9707 100644 --- a/drivers/gpu/drm/nouveau/nv50_fbcon.c +++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c @@ -95,7 +95,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) struct nouveau_fbdev *nfbdev = info->par; struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); struct nouveau_channel *chan = drm->channel; - uint32_t width, dwords, *data = (uint32_t *)image->data; + uint32_t dwords, *data = (uint32_t *)image->data; uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); uint32_t *palette = info->pseudo_palette; int ret; @@ -107,9 +107,6 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) if (ret) return ret; - width = ALIGN(image->width, 32); - dwords = (width * image->height) >> 5; - BEGIN_NV04(chan, NvSub2D, 0x0814, 2); if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { @@ -128,6 +125,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING(chan, 0); OUT_RING(chan, image->dy); + dwords = ALIGN(image->width * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c index a0913359ac05..839f4c8c1805 100644 --- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c +++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c @@ -95,7 +95,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) struct nouveau_fbdev *nfbdev = info->par; struct nouveau_drm *drm = nouveau_drm(nfbdev->dev); struct nouveau_channel *chan = drm->channel; - uint32_t width, dwords, *data = (uint32_t *)image->data; + uint32_t dwords, *data = (uint32_t *)image->data; uint32_t mask = ~(~0 >> (32 - info->var.bits_per_pixel)); uint32_t *palette = info->pseudo_palette; int ret; @@ -107,9 +107,6 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) if (ret) return ret; - width = ALIGN(image->width, 32); - dwords = (width * image->height) >> 5; - BEGIN_NVC0(chan, NvSub2D, 0x0814, 2); if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { @@ -128,6 +125,7 @@ nvc0_fbcon_imageblit(struct fb_info *info, const struct fb_image *image) OUT_RING (chan, 0); OUT_RING (chan, image->dy); + dwords = ALIGN(image->width * image->height, 32) >> 5; while (dwords) { int push = dwords > 2047 ? 2047 : dwords; diff --git a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c index b18557858f19..19044aba265e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/core/subdev.c +++ b/drivers/gpu/drm/nouveau/nvkm/core/subdev.c @@ -57,6 +57,9 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = { [NVKM_ENGINE_CE0 ] = "ce0", [NVKM_ENGINE_CE1 ] = "ce1", [NVKM_ENGINE_CE2 ] = "ce2", + [NVKM_ENGINE_CE3 ] = "ce3", + [NVKM_ENGINE_CE4 ] = "ce4", + [NVKM_ENGINE_CE5 ] = "ce5", [NVKM_ENGINE_CIPHER ] = "cipher", [NVKM_ENGINE_DISP ] = "disp", [NVKM_ENGINE_DMAOBJ ] = "dma", @@ -71,6 +74,7 @@ nvkm_subdev_name[NVKM_SUBDEV_NR] = { [NVKM_ENGINE_MSVLD ] = "msvld", [NVKM_ENGINE_NVENC0 ] = "nvenc0", [NVKM_ENGINE_NVENC1 ] = "nvenc1", + [NVKM_ENGINE_NVENC2 ] = "nvenc2", [NVKM_ENGINE_NVDEC ] = "nvdec", [NVKM_ENGINE_PM ] = "pm", [NVKM_ENGINE_SEC ] = "sec", @@ -105,7 +109,7 @@ nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) } } - nvkm_mc_reset(device->mc, subdev->index); + nvkm_mc_reset(device, subdev->index); time = ktime_to_us(ktime_get()) - time; nvkm_trace(subdev, "%s completed in %lldus\n", action, time); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild index 9c19d59b47df..a4458a8eb30a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/Kbuild @@ -3,3 +3,5 @@ nvkm-y += nvkm/engine/ce/gf100.o nvkm-y += nvkm/engine/ce/gk104.o nvkm-y += nvkm/engine/ce/gm107.o nvkm-y += nvkm/engine/ce/gm200.o +nvkm-y += nvkm/engine/ce/gp100.o +nvkm-y += nvkm/engine/ce/gp104.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c new file mode 100644 index 000000000000..c7710456bc30 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp100.c @@ -0,0 +1,102 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "priv.h" +#include <core/enum.h> + +#include <nvif/class.h> + +static const struct nvkm_enum +gp100_ce_launcherr_report[] = { + { 0x0, "NO_ERR" }, + { 0x1, "2D_LAYER_EXCEEDS_DEPTH" }, + { 0x2, "INVALID_ALIGNMENT" }, + { 0x3, "MEM2MEM_RECT_OUT_OF_BOUNDS" }, + { 0x4, "SRC_LINE_EXCEEDS_PITCH" }, + { 0x5, "SRC_LINE_EXCEEDS_NEG_PITCH" }, + { 0x6, "DST_LINE_EXCEEDS_PITCH" }, + { 0x7, "DST_LINE_EXCEEDS_NEG_PITCH" }, + { 0x8, "BAD_SRC_PIXEL_COMP_REF" }, + { 0x9, "INVALID_VALUE" }, + { 0xa, "UNUSED_FIELD" }, + { 0xb, "INVALID_OPERATION" }, + { 0xc, "NO_RESOURCES" }, + { 0xd, "INVALID_CONFIG" }, + {} +}; + +static void +gp100_ce_intr_launcherr(struct nvkm_engine *ce, const u32 base) +{ + struct nvkm_subdev *subdev = &ce->subdev; + struct nvkm_device *device = subdev->device; + u32 stat = nvkm_rd32(device, 0x104418 + base); + const struct nvkm_enum *en = + nvkm_enum_find(gp100_ce_launcherr_report, stat & 0x0000000f); + nvkm_warn(subdev, "LAUNCHERR %08x [%s]\n", stat, en ? en->name : ""); +} + +void +gp100_ce_intr(struct nvkm_engine *ce) +{ + const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x80; + struct nvkm_subdev *subdev = &ce->subdev; + struct nvkm_device *device = subdev->device; + u32 mask = nvkm_rd32(device, 0x10440c + base); + u32 intr = nvkm_rd32(device, 0x104410 + base) & mask; + if (intr & 0x00000001) { //XXX: guess + nvkm_warn(subdev, "BLOCKPIPE\n"); + nvkm_wr32(device, 0x104410 + base, 0x00000001); + intr &= ~0x00000001; + } + if (intr & 0x00000002) { //XXX: guess + nvkm_warn(subdev, "NONBLOCKPIPE\n"); + nvkm_wr32(device, 0x104410 + base, 0x00000002); + intr &= ~0x00000002; + } + if (intr & 0x00000004) { + gp100_ce_intr_launcherr(ce, base); + nvkm_wr32(device, 0x104410 + base, 0x00000004); + intr &= ~0x00000004; + } + if (intr) { + nvkm_warn(subdev, "intr %08x\n", intr); + nvkm_wr32(device, 0x104410 + base, intr); + } +} + +static const struct nvkm_engine_func +gp100_ce = { + .intr = gp100_ce_intr, + .sclass = { + { -1, -1, PASCAL_DMA_COPY_A }, + {} + } +}; + +int +gp100_ce_new(struct nvkm_device *device, int index, + struct nvkm_engine **pengine) +{ + return nvkm_engine_new_(&gp100_ce, device, index, true, pengine); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c new file mode 100644 index 000000000000..20e019788a53 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/gp104.c @@ -0,0 +1,44 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "priv.h" +#include <core/enum.h> + +#include <nvif/class.h> + +static const struct nvkm_engine_func +gp104_ce = { + .intr = gp100_ce_intr, + .sclass = { + { -1, -1, PASCAL_DMA_COPY_B }, + { -1, -1, PASCAL_DMA_COPY_A }, + {} + } +}; + +int +gp104_ce_new(struct nvkm_device *device, int index, + struct nvkm_engine **pengine) +{ + return nvkm_engine_new_(&gp104_ce, device, index, true, pengine); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h index e2fa8b161943..2dce405976ad 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/ce/priv.h @@ -4,4 +4,5 @@ void gt215_ce_intr(struct nvkm_falcon *, struct nvkm_fifo_chan *); void gk104_ce_intr(struct nvkm_engine *); +void gp100_ce_intr(struct nvkm_engine *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c index 4572debcb0c9..7218a067a6c5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c @@ -2148,6 +2148,67 @@ nv12b_chipset = { .sw = gf100_sw_new, }; +static const struct nvkm_device_chip +nv130_chipset = { + .name = "GP100", + .bar = gf100_bar_new, + .bios = nvkm_bios_new, + .bus = gf100_bus_new, + .devinit = gm200_devinit_new, + .fb = gp100_fb_new, + .fuse = gm107_fuse_new, + .gpio = gk104_gpio_new, + .i2c = gm200_i2c_new, + .ibus = gm200_ibus_new, + .imem = nv50_instmem_new, + .ltc = gp100_ltc_new, + .mc = gp100_mc_new, + .mmu = gf100_mmu_new, + .secboot = gm200_secboot_new, + .pci = gp100_pci_new, + .timer = gk20a_timer_new, + .top = gk104_top_new, + .ce[0] = gp100_ce_new, + .ce[1] = gp100_ce_new, + .ce[2] = gp100_ce_new, + .ce[3] = gp100_ce_new, + .ce[4] = gp100_ce_new, + .ce[5] = gp100_ce_new, + .dma = gf119_dma_new, + .disp = gp100_disp_new, + .fifo = gp100_fifo_new, + .gr = gp100_gr_new, + .sw = gf100_sw_new, +}; + +static const struct nvkm_device_chip +nv134_chipset = { + .name = "GP104", + .bar = gf100_bar_new, + .bios = nvkm_bios_new, + .bus = gf100_bus_new, + .devinit = gm200_devinit_new, + .fb = gp104_fb_new, + .fuse = gm107_fuse_new, + .gpio = gk104_gpio_new, + .i2c = gm200_i2c_new, + .ibus = gm200_ibus_new, + .imem = nv50_instmem_new, + .ltc = gp100_ltc_new, + .mc = gp100_mc_new, + .mmu = gf100_mmu_new, + .pci = gp100_pci_new, + .timer = gk20a_timer_new, + .top = gk104_top_new, + .ce[0] = gp104_ce_new, + .ce[1] = gp104_ce_new, + .ce[2] = gp104_ce_new, + .ce[3] = gp104_ce_new, + .disp = gp104_disp_new, + .dma = gf119_dma_new, + .fifo = gp100_fifo_new, +}; + static int nvkm_device_event_ctor(struct nvkm_object *object, void *data, u32 size, struct nvkm_notify *notify) @@ -2221,6 +2282,9 @@ nvkm_device_engine(struct nvkm_device *device, int index) _(CE0 , device->ce[0] , device->ce[0]); _(CE1 , device->ce[1] , device->ce[1]); _(CE2 , device->ce[2] , device->ce[2]); + _(CE3 , device->ce[3] , device->ce[3]); + _(CE4 , device->ce[4] , device->ce[4]); + _(CE5 , device->ce[5] , device->ce[5]); _(CIPHER , device->cipher , device->cipher); _(DISP , device->disp , &device->disp->engine); _(DMAOBJ , device->dma , &device->dma->engine); @@ -2235,6 +2299,7 @@ nvkm_device_engine(struct nvkm_device *device, int index) _(MSVLD , device->msvld , device->msvld); _(NVENC0 , device->nvenc[0], device->nvenc[0]); _(NVENC1 , device->nvenc[1], device->nvenc[1]); + _(NVENC2 , device->nvenc[2], device->nvenc[2]); _(NVDEC , device->nvdec , device->nvdec); _(PM , device->pm , &device->pm->engine); _(SEC , device->sec , device->sec); @@ -2492,6 +2557,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x100: device->card_type = NV_E0; break; case 0x110: case 0x120: device->card_type = GM100; break; + case 0x130: device->card_type = GP100; break; default: break; } @@ -2576,6 +2642,8 @@ nvkm_device_ctor(const struct nvkm_device_func *func, case 0x124: device->chip = &nv124_chipset; break; case 0x126: device->chip = &nv126_chipset; break; case 0x12b: device->chip = &nv12b_chipset; break; + case 0x130: device->chip = &nv130_chipset; break; + case 0x134: device->chip = &nv134_chipset; break; default: nvdev_error(device, "unknown chipset (%08x)\n", boot0); goto done; @@ -2659,6 +2727,9 @@ nvkm_device_ctor(const struct nvkm_device_func *func, _(NVKM_ENGINE_CE0 , ce[0]); _(NVKM_ENGINE_CE1 , ce[1]); _(NVKM_ENGINE_CE2 , ce[2]); + _(NVKM_ENGINE_CE3 , ce[3]); + _(NVKM_ENGINE_CE4 , ce[4]); + _(NVKM_ENGINE_CE5 , ce[5]); _(NVKM_ENGINE_CIPHER , cipher); _(NVKM_ENGINE_DISP , disp); _(NVKM_ENGINE_DMAOBJ , dma); @@ -2673,6 +2744,7 @@ nvkm_device_ctor(const struct nvkm_device_func *func, _(NVKM_ENGINE_MSVLD , msvld); _(NVKM_ENGINE_NVENC0 , nvenc[0]); _(NVKM_ENGINE_NVENC1 , nvenc[1]); + _(NVKM_ENGINE_NVENC2 , nvenc[2]); _(NVKM_ENGINE_NVDEC , nvdec); _(NVKM_ENGINE_PM , pm); _(NVKM_ENGINE_SEC , sec); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c index 18fab3973ce5..b1b693219db3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/pci.c @@ -1614,7 +1614,6 @@ nvkm_device_pci_func = { .fini = nvkm_device_pci_fini, .resource_addr = nvkm_device_pci_resource_addr, .resource_size = nvkm_device_pci_resource_size, - .cpu_coherent = !IS_ENABLED(CONFIG_ARM) && !IS_ENABLED(CONFIG_ARM64), }; int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c index ec12efb4689a..939682f18788 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c @@ -191,13 +191,11 @@ static irqreturn_t nvkm_device_tegra_intr(int irq, void *arg) { struct nvkm_device_tegra *tdev = arg; - struct nvkm_mc *mc = tdev->device.mc; + struct nvkm_device *device = &tdev->device; bool handled = false; - if (likely(mc)) { - nvkm_mc_intr_unarm(mc); - nvkm_mc_intr(mc, &handled); - nvkm_mc_intr_rearm(mc); - } + nvkm_mc_intr_unarm(device); + nvkm_mc_intr(device, &handled); + nvkm_mc_intr_rearm(device); return handled ? IRQ_HANDLED : IRQ_NONE; } @@ -247,7 +245,6 @@ nvkm_device_tegra_func = { .fini = nvkm_device_tegra_fini, .resource_addr = nvkm_device_tegra_resource_addr, .resource_size = nvkm_device_tegra_resource_size, - .cpu_coherent = false, }; int @@ -313,6 +310,7 @@ nvkm_device_tegra_new(const struct nvkm_device_tegra_func *func, goto remove; tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value; + tdev->gpu_speedo_id = tegra_sku_info.gpu_speedo_id; ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev, NVKM_DEVICE_TEGRA, pdev->id, NULL, cfg, dbg, detect, mmio, subdev_mask, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c index 137066426ed7..79a8f71cf788 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/user.c @@ -102,6 +102,7 @@ nvkm_udevice_info(struct nvkm_udevice *udev, void *data, u32 size) case NV_C0: args->v0.family = NV_DEVICE_INFO_V0_FERMI; break; case NV_E0: args->v0.family = NV_DEVICE_INFO_V0_KEPLER; break; case GM100: args->v0.family = NV_DEVICE_INFO_V0_MAXWELL; break; + case GP100: args->v0.family = NV_DEVICE_INFO_V0_PASCAL; break; default: args->v0.family = 0; break; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild index a74c5dd27dc0..77a52b54a31e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/Kbuild @@ -10,6 +10,8 @@ nvkm-y += nvkm/engine/disp/gk104.o nvkm-y += nvkm/engine/disp/gk110.o nvkm-y += nvkm/engine/disp/gm107.o nvkm-y += nvkm/engine/disp/gm200.o +nvkm-y += nvkm/engine/disp/gp100.o +nvkm-y += nvkm/engine/disp/gp104.o nvkm-y += nvkm/engine/disp/outp.o nvkm-y += nvkm/engine/disp/outpdp.o @@ -18,6 +20,7 @@ nvkm-y += nvkm/engine/disp/piornv50.o nvkm-y += nvkm/engine/disp/sornv50.o nvkm-y += nvkm/engine/disp/sorg94.o nvkm-y += nvkm/engine/disp/sorgf119.o +nvkm-y += nvkm/engine/disp/sorgm107.o nvkm-y += nvkm/engine/disp/sorgm200.o nvkm-y += nvkm/engine/disp/dport.o @@ -44,12 +47,15 @@ nvkm-y += nvkm/engine/disp/rootgk104.o nvkm-y += nvkm/engine/disp/rootgk110.o nvkm-y += nvkm/engine/disp/rootgm107.o nvkm-y += nvkm/engine/disp/rootgm200.o +nvkm-y += nvkm/engine/disp/rootgp100.o +nvkm-y += nvkm/engine/disp/rootgp104.o nvkm-y += nvkm/engine/disp/channv50.o nvkm-y += nvkm/engine/disp/changf119.o nvkm-y += nvkm/engine/disp/dmacnv50.o nvkm-y += nvkm/engine/disp/dmacgf119.o +nvkm-y += nvkm/engine/disp/dmacgp104.o nvkm-y += nvkm/engine/disp/basenv50.o nvkm-y += nvkm/engine/disp/baseg84.o @@ -58,6 +64,7 @@ nvkm-y += nvkm/engine/disp/basegt215.o nvkm-y += nvkm/engine/disp/basegf119.o nvkm-y += nvkm/engine/disp/basegk104.o nvkm-y += nvkm/engine/disp/basegk110.o +nvkm-y += nvkm/engine/disp/basegp104.o nvkm-y += nvkm/engine/disp/corenv50.o nvkm-y += nvkm/engine/disp/coreg84.o @@ -69,6 +76,8 @@ nvkm-y += nvkm/engine/disp/coregk104.o nvkm-y += nvkm/engine/disp/coregk110.o nvkm-y += nvkm/engine/disp/coregm107.o nvkm-y += nvkm/engine/disp/coregm200.o +nvkm-y += nvkm/engine/disp/coregp100.o +nvkm-y += nvkm/engine/disp/coregp104.o nvkm-y += nvkm/engine/disp/ovlynv50.o nvkm-y += nvkm/engine/disp/ovlyg84.o @@ -76,6 +85,7 @@ nvkm-y += nvkm/engine/disp/ovlygt200.o nvkm-y += nvkm/engine/disp/ovlygt215.o nvkm-y += nvkm/engine/disp/ovlygf119.o nvkm-y += nvkm/engine/disp/ovlygk104.o +nvkm-y += nvkm/engine/disp/ovlygp104.o nvkm-y += nvkm/engine/disp/piocnv50.o nvkm-y += nvkm/engine/disp/piocgf119.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c new file mode 100644 index 000000000000..51688e37c54e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/basegp104.c @@ -0,0 +1,38 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "dmacnv50.h" +#include "rootnv50.h" + +#include <nvif/class.h> + +const struct nv50_disp_dmac_oclass +gp104_disp_base_oclass = { + .base.oclass = GK110_DISP_BASE_CHANNEL_DMA, + .base.minver = 0, + .base.maxver = 0, + .ctor = nv50_disp_base_new, + .func = &gp104_disp_dmac_func, + .mthd = &gf119_disp_base_chan_mthd, + .chid = 1, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h index aee374884c96..f5f683d9fd20 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/channv50.h @@ -85,6 +85,7 @@ extern const struct nv50_disp_mthd_list gf119_disp_core_mthd_pior; extern const struct nv50_disp_chan_mthd gf119_disp_base_chan_mthd; extern const struct nv50_disp_chan_mthd gk104_disp_core_chan_mthd; +extern const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd; struct nv50_disp_pioc_oclass { int (*ctor)(const struct nv50_disp_chan_func *, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c index 6b1dc703dac7..21fbf89b6319 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregf119.c @@ -171,7 +171,7 @@ gf119_disp_core_chan_mthd = { } }; -static void +void gf119_disp_core_fini(struct nv50_disp_dmac *chan) { struct nv50_disp *disp = chan->base.root->disp; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c new file mode 100644 index 000000000000..d5dff6619d4d --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp100.c @@ -0,0 +1,38 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "dmacnv50.h" +#include "rootnv50.h" + +#include <nvif/class.h> + +const struct nv50_disp_dmac_oclass +gp100_disp_core_oclass = { + .base.oclass = GP100_DISP_CORE_CHANNEL_DMA, + .base.minver = 0, + .base.maxver = 0, + .ctor = nv50_disp_core_new, + .func = &gf119_disp_core_func, + .mthd = &gk104_disp_core_chan_mthd, + .chid = 0, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c new file mode 100644 index 000000000000..6922f4007b61 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/coregp104.c @@ -0,0 +1,78 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "dmacnv50.h" +#include "rootnv50.h" + +#include <subdev/timer.h> + +#include <nvif/class.h> + +static int +gp104_disp_core_init(struct nv50_disp_dmac *chan) +{ + struct nv50_disp *disp = chan->base.root->disp; + struct nvkm_subdev *subdev = &disp->base.engine.subdev; + struct nvkm_device *device = subdev->device; + + /* enable error reporting */ + nvkm_mask(device, 0x6100a0, 0x00000001, 0x00000001); + + /* initialise channel for dma command submission */ + nvkm_wr32(device, 0x611494, chan->push); + nvkm_wr32(device, 0x611498, 0x00010000); + nvkm_wr32(device, 0x61149c, 0x00000001); + nvkm_mask(device, 0x610490, 0x00000010, 0x00000010); + nvkm_wr32(device, 0x640000, 0x00000000); + nvkm_wr32(device, 0x610490, 0x01000013); + + /* wait for it to go inactive */ + if (nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x610490) & 0x80000000)) + break; + ) < 0) { + nvkm_error(subdev, "core init: %08x\n", + nvkm_rd32(device, 0x610490)); + return -EBUSY; + } + + return 0; +} + +const struct nv50_disp_dmac_func +gp104_disp_core_func = { + .init = gp104_disp_core_init, + .fini = gf119_disp_core_fini, + .bind = gf119_disp_dmac_bind, +}; + +const struct nv50_disp_dmac_oclass +gp104_disp_core_oclass = { + .base.oclass = GP104_DISP_CORE_CHANNEL_DMA, + .base.minver = 0, + .base.maxver = 0, + .ctor = nv50_disp_core_new, + .func = &gp104_disp_core_func, + .mthd = &gk104_disp_core_chan_mthd, + .chid = 0, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c index 876b14549a58..a57f7cef307a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgf119.c @@ -36,7 +36,7 @@ gf119_disp_dmac_bind(struct nv50_disp_dmac *chan, chan->base.chid << 27 | 0x00000001); } -static void +void gf119_disp_dmac_fini(struct nv50_disp_dmac *chan) { struct nv50_disp *disp = chan->base.root->disp; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c new file mode 100644 index 000000000000..ad24c2c57696 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacgp104.c @@ -0,0 +1,66 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "dmacnv50.h" +#include "rootnv50.h" + +#include <subdev/timer.h> + +static int +gp104_disp_dmac_init(struct nv50_disp_dmac *chan) +{ + struct nv50_disp *disp = chan->base.root->disp; + struct nvkm_subdev *subdev = &disp->base.engine.subdev; + struct nvkm_device *device = subdev->device; + int chid = chan->base.chid; + + /* enable error reporting */ + nvkm_mask(device, 0x6100a0, 0x00000001 << chid, 0x00000001 << chid); + + /* initialise channel for dma command submission */ + nvkm_wr32(device, 0x611494 + (chid * 0x0010), chan->push); + nvkm_wr32(device, 0x611498 + (chid * 0x0010), 0x00010000); + nvkm_wr32(device, 0x61149c + (chid * 0x0010), 0x00000001); + nvkm_mask(device, 0x610490 + (chid * 0x0010), 0x00000010, 0x00000010); + nvkm_wr32(device, 0x640000 + (chid * 0x1000), 0x00000000); + nvkm_wr32(device, 0x610490 + (chid * 0x0010), 0x00000013); + + /* wait for it to go inactive */ + if (nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x610490 + (chid * 0x10)) & 0x80000000)) + break; + ) < 0) { + nvkm_error(subdev, "ch %d init: %08x\n", chid, + nvkm_rd32(device, 0x610490 + (chid * 0x10))); + return -EBUSY; + } + + return 0; +} + +const struct nv50_disp_dmac_func +gp104_disp_dmac_func = { + .init = gp104_disp_dmac_init, + .fini = gf119_disp_dmac_fini, + .bind = gf119_disp_dmac_bind, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h index fc84eb8b5c45..43ac05857853 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dmacnv50.h @@ -25,8 +25,12 @@ int nv50_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32); extern const struct nv50_disp_dmac_func nv50_disp_core_func; extern const struct nv50_disp_dmac_func gf119_disp_dmac_func; +void gf119_disp_dmac_fini(struct nv50_disp_dmac *); int gf119_disp_dmac_bind(struct nv50_disp_dmac *, struct nvkm_object *, u32); extern const struct nv50_disp_dmac_func gf119_disp_core_func; +void gf119_disp_core_fini(struct nv50_disp_dmac *); + +extern const struct nv50_disp_dmac_func gp104_disp_dmac_func; struct nv50_disp_dmac_oclass { int (*ctor)(const struct nv50_disp_dmac_func *, @@ -88,4 +92,10 @@ extern const struct nv50_disp_dmac_oclass gk110_disp_base_oclass; extern const struct nv50_disp_dmac_oclass gm107_disp_core_oclass; extern const struct nv50_disp_dmac_oclass gm200_disp_core_oclass; + +extern const struct nv50_disp_dmac_oclass gp100_disp_core_oclass; + +extern const struct nv50_disp_dmac_oclass gp104_disp_core_oclass; +extern const struct nv50_disp_dmac_oclass gp104_disp_base_oclass; +extern const struct nv50_disp_dmac_oclass gp104_disp_ovly_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c index f0314664349c..29e84b241cca 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf119.c @@ -79,8 +79,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, list_for_each_entry(outp, &disp->base.outp, head) { if ((outp->info.hasht & 0xff) == type && (outp->info.hashm & mask) == mask) { - *data = nvbios_outp_match(bios, outp->info.hasht, - outp->info.hashm, + *data = nvbios_outp_match(bios, outp->info.hasht, mask, ver, hdr, cnt, len, info); if (!*data) return NULL; @@ -155,25 +154,21 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) if (!outp) return NULL; + *conf = (ctrl & 0x00000f00) >> 8; switch (outp->info.type) { case DCB_OUTPUT_TMDS: - *conf = (ctrl & 0x00000f00) >> 8; if (*conf == 5) *conf |= 0x0100; break; case DCB_OUTPUT_LVDS: - *conf = disp->sor.lvdsconf; + *conf |= disp->sor.lvdsconf; break; - case DCB_OUTPUT_DP: - *conf = (ctrl & 0x00000f00) >> 8; - break; - case DCB_OUTPUT_ANALOG: default: - *conf = 0x00ff; break; } - data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); + data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8, + &ver, &hdr, &cnt, &len, &info2); if (data && id < 0xff) { data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); if (data) { @@ -418,7 +413,7 @@ gf119_disp_intr_supervisor(struct work_struct *work) nvkm_wr32(device, 0x6101d0, 0x80000000); } -static void +void gf119_disp_intr_error(struct nv50_disp *disp, int chid) { struct nvkm_subdev *subdev = &disp->base.engine.subdev; @@ -466,7 +461,7 @@ gf119_disp_intr(struct nv50_disp *disp) u32 stat = nvkm_rd32(device, 0x61009c); int chid = ffs(stat) - 1; if (chid >= 0) - gf119_disp_intr_error(disp, chid); + disp->func->intr_error(disp, chid); intr &= ~0x00000002; } @@ -510,6 +505,7 @@ gf119_disp_new_(const struct nv50_disp_func *func, struct nvkm_device *device, static const struct nv50_disp_func gf119_disp = { .intr = gf119_disp_intr, + .intr_error = gf119_disp_intr_error, .uevent = &gf119_disp_chan_uevent, .super = gf119_disp_intr_supervisor, .root = &gf119_disp_root_oclass, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c index a86384b8e388..37f145cf30d7 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk104.c @@ -27,6 +27,7 @@ static const struct nv50_disp_func gk104_disp = { .intr = gf119_disp_intr, + .intr_error = gf119_disp_intr_error, .uevent = &gf119_disp_chan_uevent, .super = gf119_disp_intr_supervisor, .root = &gk104_disp_root_oclass, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c index 0d574c7e594a..e14ac946608c 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gk110.c @@ -27,6 +27,7 @@ static const struct nv50_disp_func gk110_disp = { .intr = gf119_disp_intr, + .intr_error = gf119_disp_intr_error, .uevent = &gf119_disp_chan_uevent, .super = gf119_disp_intr_supervisor, .root = &gk110_disp_root_oclass, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c index b6944142d616..2f2437cc5891 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm107.c @@ -27,6 +27,7 @@ static const struct nv50_disp_func gm107_disp = { .intr = gf119_disp_intr, + .intr_error = gf119_disp_intr_error, .uevent = &gf119_disp_chan_uevent, .super = gf119_disp_intr_supervisor, .root = &gm107_disp_root_oclass, @@ -36,7 +37,7 @@ gm107_disp = { .outp.internal.crt = nv50_dac_output_new, .outp.internal.tmds = nv50_sor_output_new, .outp.internal.lvds = nv50_sor_output_new, - .outp.internal.dp = gf119_sor_dp_new, + .outp.internal.dp = gm107_sor_dp_new, .dac.nr = 3, .dac.power = nv50_dac_power, .dac.sense = nv50_dac_sense, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c index 67eec8620719..9f368d4ee61e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gm200.c @@ -27,6 +27,7 @@ static const struct nv50_disp_func gm200_disp = { .intr = gf119_disp_intr, + .intr_error = gf119_disp_intr_error, .uevent = &gf119_disp_chan_uevent, .super = gf119_disp_intr_supervisor, .root = &gm200_disp_root_oclass, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c new file mode 100644 index 000000000000..4f81bf31435e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp100.c @@ -0,0 +1,55 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "nv50.h" +#include "rootnv50.h" + +static const struct nv50_disp_func +gp100_disp = { + .intr = gf119_disp_intr, + .intr_error = gf119_disp_intr_error, + .uevent = &gf119_disp_chan_uevent, + .super = gf119_disp_intr_supervisor, + .root = &gp100_disp_root_oclass, + .head.vblank_init = gf119_disp_vblank_init, + .head.vblank_fini = gf119_disp_vblank_fini, + .head.scanoutpos = gf119_disp_root_scanoutpos, + .outp.internal.crt = nv50_dac_output_new, + .outp.internal.tmds = nv50_sor_output_new, + .outp.internal.lvds = nv50_sor_output_new, + .outp.internal.dp = gm200_sor_dp_new, + .dac.nr = 3, + .dac.power = nv50_dac_power, + .dac.sense = nv50_dac_sense, + .sor.nr = 4, + .sor.power = nv50_sor_power, + .sor.hda_eld = gf119_hda_eld, + .sor.hdmi = gk104_hdmi_ctrl, + .sor.magic = gm200_sor_magic, +}; + +int +gp100_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +{ + return gf119_disp_new_(&gp100_disp, device, index, pdisp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c new file mode 100644 index 000000000000..3bf3380336e4 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gp104.c @@ -0,0 +1,81 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "nv50.h" +#include "rootnv50.h" + +static void +gp104_disp_intr_error(struct nv50_disp *disp, int chid) +{ + struct nvkm_subdev *subdev = &disp->base.engine.subdev; + struct nvkm_device *device = subdev->device; + u32 mthd = nvkm_rd32(device, 0x6111f0 + (chid * 12)); + u32 data = nvkm_rd32(device, 0x6111f4 + (chid * 12)); + u32 unkn = nvkm_rd32(device, 0x6111f8 + (chid * 12)); + + nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n", + chid, (mthd & 0x0000ffc), data, mthd, unkn); + + if (chid < ARRAY_SIZE(disp->chan)) { + switch (mthd & 0xffc) { + case 0x0080: + nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR); + break; + default: + break; + } + } + + nvkm_wr32(device, 0x61009c, (1 << chid)); + nvkm_wr32(device, 0x6111f0 + (chid * 12), 0x90000000); +} + +static const struct nv50_disp_func +gp104_disp = { + .intr = gf119_disp_intr, + .intr_error = gp104_disp_intr_error, + .uevent = &gf119_disp_chan_uevent, + .super = gf119_disp_intr_supervisor, + .root = &gp104_disp_root_oclass, + .head.vblank_init = gf119_disp_vblank_init, + .head.vblank_fini = gf119_disp_vblank_fini, + .head.scanoutpos = gf119_disp_root_scanoutpos, + .outp.internal.crt = nv50_dac_output_new, + .outp.internal.tmds = nv50_sor_output_new, + .outp.internal.lvds = nv50_sor_output_new, + .outp.internal.dp = gm200_sor_dp_new, + .dac.nr = 3, + .dac.power = nv50_dac_power, + .dac.sense = nv50_dac_sense, + .sor.nr = 4, + .sor.power = nv50_sor_power, + .sor.hda_eld = gf119_hda_eld, + .sor.hdmi = gk104_hdmi_ctrl, + .sor.magic = gm200_sor_magic, +}; + +int +gp104_disp_new(struct nvkm_device *device, int index, struct nvkm_disp **pdisp) +{ + return gf119_disp_new_(&gp104_disp, device, index, pdisp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c index 4226d2153b9c..fbb8c7dc18fd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.c @@ -32,6 +32,7 @@ #include <subdev/bios/init.h> #include <subdev/bios/pll.h> #include <subdev/devinit.h> +#include <subdev/timer.h> static const struct nvkm_disp_oclass * nv50_disp_root_(struct nvkm_disp *base) @@ -269,8 +270,7 @@ exec_lookup(struct nv50_disp *disp, int head, int or, u32 ctrl, list_for_each_entry(outp, &disp->base.outp, head) { if ((outp->info.hasht & 0xff) == type && (outp->info.hashm & mask) == mask) { - *data = nvbios_outp_match(bios, outp->info.hasht, - outp->info.hashm, + *data = nvbios_outp_match(bios, outp->info.hasht, mask, ver, hdr, cnt, len, info); if (!*data) return NULL; @@ -387,22 +387,17 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) if (!outp) return NULL; + *conf = (ctrl & 0x00000f00) >> 8; if (outp->info.location == 0) { switch (outp->info.type) { case DCB_OUTPUT_TMDS: - *conf = (ctrl & 0x00000f00) >> 8; if (*conf == 5) *conf |= 0x0100; break; case DCB_OUTPUT_LVDS: - *conf = disp->sor.lvdsconf; + *conf |= disp->sor.lvdsconf; break; - case DCB_OUTPUT_DP: - *conf = (ctrl & 0x00000f00) >> 8; - break; - case DCB_OUTPUT_ANALOG: default: - *conf = 0x00ff; break; } } else { @@ -410,7 +405,8 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) pclk = pclk / 2; } - data = nvbios_ocfg_match(bios, data, *conf, &ver, &hdr, &cnt, &len, &info2); + data = nvbios_ocfg_match(bios, data, *conf & 0xff, *conf >> 8, + &ver, &hdr, &cnt, &len, &info2); if (data && id < 0xff) { data = nvbios_oclk_match(bios, info2.clkcmp[id], pclk); if (data) { @@ -430,6 +426,134 @@ exec_clkcmp(struct nv50_disp *disp, int head, int id, u32 pclk, u32 *conf) return outp; } +static bool +nv50_disp_dptmds_war(struct nvkm_device *device) +{ + switch (device->chipset) { + case 0x94: + case 0x96: + case 0x98: + case 0xaa: + case 0xac: + return true; + default: + break; + } + return false; +} + +static bool +nv50_disp_dptmds_war_needed(struct nv50_disp *disp, struct dcb_output *outp) +{ + struct nvkm_device *device = disp->base.engine.subdev.device; + const u32 soff = __ffs(outp->or) * 0x800; + if (nv50_disp_dptmds_war(device) && outp->type == DCB_OUTPUT_TMDS) { + switch (nvkm_rd32(device, 0x614300 + soff) & 0x00030000) { + case 0x00000000: + case 0x00030000: + return true; + default: + break; + } + } + return false; + +} + +static void +nv50_disp_dptmds_war_2(struct nv50_disp *disp, struct dcb_output *outp) +{ + struct nvkm_device *device = disp->base.engine.subdev.device; + const u32 soff = __ffs(outp->or) * 0x800; + + if (!nv50_disp_dptmds_war_needed(disp, outp)) + return; + + nvkm_mask(device, 0x00e840, 0x80000000, 0x80000000); + nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x03000000); + nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000001); + + nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x00000000); + nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x14000000); + nvkm_usec(device, 400, NVKM_DELAY); + nvkm_mask(device, 0x61c008 + soff, 0xff000000, 0x00000000); + nvkm_mask(device, 0x61c00c + soff, 0x0f000000, 0x01000000); + + if (nvkm_rd32(device, 0x61c004 + soff) & 0x00000001) { + u32 seqctl = nvkm_rd32(device, 0x61c030 + soff); + u32 pu_pc = seqctl & 0x0000000f; + nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f008000); + } +} + +static void +nv50_disp_dptmds_war_3(struct nv50_disp *disp, struct dcb_output *outp) +{ + struct nvkm_device *device = disp->base.engine.subdev.device; + const u32 soff = __ffs(outp->or) * 0x800; + u32 sorpwr; + + if (!nv50_disp_dptmds_war_needed(disp, outp)) + return; + + sorpwr = nvkm_rd32(device, 0x61c004 + soff); + if (sorpwr & 0x00000001) { + u32 seqctl = nvkm_rd32(device, 0x61c030 + soff); + u32 pd_pc = (seqctl & 0x00000f00) >> 8; + u32 pu_pc = seqctl & 0x0000000f; + + nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x1f008000); + + nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000)) + break; + ); + nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000000); + nvkm_msec(device, 2000, + if (!(nvkm_rd32(device, 0x61c030 + soff) & 0x10000000)) + break; + ); + + nvkm_wr32(device, 0x61c040 + soff + pd_pc * 4, 0x00002000); + nvkm_wr32(device, 0x61c040 + soff + pu_pc * 4, 0x1f000000); + } + + nvkm_mask(device, 0x61c10c + soff, 0x00000001, 0x00000000); + nvkm_mask(device, 0x614300 + soff, 0x03000000, 0x00000000); + + if (sorpwr & 0x00000001) { + nvkm_mask(device, 0x61c004 + soff, 0x80000001, 0x80000001); + } +} + +static void +nv50_disp_update_sppll1(struct nv50_disp *disp) +{ + struct nvkm_device *device = disp->base.engine.subdev.device; + bool used = false; + int sor; + + if (!nv50_disp_dptmds_war(device)) + return; + + for (sor = 0; sor < disp->func->sor.nr; sor++) { + u32 clksor = nvkm_rd32(device, 0x614300 + (sor * 0x800)); + switch (clksor & 0x03000000) { + case 0x02000000: + case 0x03000000: + used = true; + break; + default: + break; + } + } + + if (used) + return; + + nvkm_mask(device, 0x00e840, 0x80000000, 0x00000000); +} + static void nv50_disp_intr_unk10_0(struct nv50_disp *disp, int head) { @@ -683,6 +807,8 @@ nv50_disp_intr_unk20_2(struct nv50_disp *disp, int head) nvkm_mask(device, hreg, 0x0000000f, hval); nvkm_mask(device, oreg, mask, oval); + + nv50_disp_dptmds_war_2(disp, &outp->info); } /* If programming a TMDS output on a SOR that can also be configured for @@ -724,6 +850,7 @@ nv50_disp_intr_unk40_0(struct nv50_disp *disp, int head) if (outp->info.location == 0 && outp->info.type == DCB_OUTPUT_TMDS) nv50_disp_intr_unk40_0_tmds(disp, &outp->info); + nv50_disp_dptmds_war_3(disp, &outp->info); } void @@ -771,6 +898,7 @@ nv50_disp_intr_supervisor(struct work_struct *work) continue; nv50_disp_intr_unk40_0(disp, head); } + nv50_disp_update_sppll1(disp); } nvkm_wr32(device, 0x610030, 0x80000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h index aecebd8717e5..1e1de6bfe85a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/nv50.h @@ -68,6 +68,7 @@ struct nv50_disp_func_outp { struct nv50_disp_func { void (*intr)(struct nv50_disp *); + void (*intr_error)(struct nv50_disp *, int chid); const struct nvkm_event_func *uevent; void (*super)(struct work_struct *); @@ -114,4 +115,5 @@ void gf119_disp_vblank_init(struct nv50_disp *, int); void gf119_disp_vblank_fini(struct nv50_disp *, int); void gf119_disp_intr(struct nv50_disp *); void gf119_disp_intr_supervisor(struct work_struct *); +void gf119_disp_intr_error(struct nv50_disp *, int); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h index e9067ba4e179..4e983f6d7032 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/outpdp.h @@ -62,7 +62,12 @@ int g94_sor_dp_lnk_pwr(struct nvkm_output_dp *, int); int gf119_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, struct nvkm_output **); int gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *, int, int, bool); +int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *, int, int, int, int); -int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, - struct nvkm_output **); +int gm107_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, + struct nvkm_output **); +int gm107_sor_dp_pattern(struct nvkm_output_dp *, int); + +int gm200_sor_dp_new(struct nvkm_disp *, int, struct dcb_output *, + struct nvkm_output **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c index 2e2dc0641ef2..2f0220b39f34 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygk104.c @@ -80,7 +80,7 @@ gk104_disp_ovly_mthd_base = { } }; -static const struct nv50_disp_chan_mthd +const struct nv50_disp_chan_mthd gk104_disp_ovly_chan_mthd = { .name = "Overlay", .addr = 0x001000, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c new file mode 100644 index 000000000000..97e2dd2d908e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/ovlygp104.c @@ -0,0 +1,38 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "dmacnv50.h" +#include "rootnv50.h" + +#include <nvif/class.h> + +const struct nv50_disp_dmac_oclass +gp104_disp_ovly_oclass = { + .base.oclass = GK104_DISP_OVERLAY_CONTROL_DMA, + .base.minver = 0, + .base.maxver = 0, + .ctor = nv50_disp_ovly_new, + .func = &gp104_disp_dmac_func, + .mthd = &gk104_disp_ovly_chan_mthd, + .chid = 5, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c new file mode 100644 index 000000000000..ac8fdd728ec6 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp100.c @@ -0,0 +1,58 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "rootnv50.h" +#include "dmacnv50.h" + +#include <nvif/class.h> + +static const struct nv50_disp_root_func +gp100_disp_root = { + .init = gf119_disp_root_init, + .fini = gf119_disp_root_fini, + .dmac = { + &gp100_disp_core_oclass, + &gk110_disp_base_oclass, + &gk104_disp_ovly_oclass, + }, + .pioc = { + &gk104_disp_oimm_oclass, + &gk104_disp_curs_oclass, + }, +}; + +static int +gp100_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass, + void *data, u32 size, struct nvkm_object **pobject) +{ + return nv50_disp_root_new_(&gp100_disp_root, disp, oclass, + data, size, pobject); +} + +const struct nvkm_disp_oclass +gp100_disp_root_oclass = { + .base.oclass = GP100_DISP, + .base.minver = -1, + .base.maxver = -1, + .ctor = gp100_disp_root_new, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c new file mode 100644 index 000000000000..8443e04dc626 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootgp104.c @@ -0,0 +1,58 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "rootnv50.h" +#include "dmacnv50.h" + +#include <nvif/class.h> + +static const struct nv50_disp_root_func +gp104_disp_root = { + .init = gf119_disp_root_init, + .fini = gf119_disp_root_fini, + .dmac = { + &gp104_disp_core_oclass, + &gp104_disp_base_oclass, + &gp104_disp_ovly_oclass, + }, + .pioc = { + &gk104_disp_oimm_oclass, + &gk104_disp_curs_oclass, + }, +}; + +static int +gp104_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass, + void *data, u32 size, struct nvkm_object **pobject) +{ + return nv50_disp_root_new_(&gp104_disp_root, disp, oclass, + data, size, pobject); +} + +const struct nvkm_disp_oclass +gp104_disp_root_oclass = { + .base.oclass = GP104_DISP, + .base.minver = -1, + .base.maxver = -1, + .ctor = gp104_disp_root_new, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h index cb449ed8d92c..ad00f1724b72 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/rootnv50.h @@ -40,4 +40,6 @@ extern const struct nvkm_disp_oclass gk104_disp_root_oclass; extern const struct nvkm_disp_oclass gk110_disp_root_oclass; extern const struct nvkm_disp_oclass gm107_disp_root_oclass; extern const struct nvkm_disp_oclass gm200_disp_root_oclass; +extern const struct nvkm_disp_oclass gp100_disp_root_oclass; +extern const struct nvkm_disp_oclass gp104_disp_root_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c index b4b41b135643..22706c0a54b5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgf119.c @@ -40,8 +40,7 @@ static int gf119_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) { struct nvkm_device *device = outp->base.disp->engine.subdev.device; - const u32 loff = gf119_sor_loff(outp); - nvkm_mask(device, 0x61c110 + loff, 0x0f0f0f0f, 0x01010101 * pattern); + nvkm_mask(device, 0x61c110, 0x0f0f0f0f, 0x01010101 * pattern); return 0; } @@ -64,7 +63,7 @@ gf119_sor_dp_lnk_ctl(struct nvkm_output_dp *outp, int nr, int bw, bool ef) return 0; } -static int +int gf119_sor_dp_drv_ctl(struct nvkm_output_dp *outp, int ln, int vs, int pe, int pc) { diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c new file mode 100644 index 000000000000..37790b2617c5 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm107.c @@ -0,0 +1,53 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "nv50.h" +#include "outpdp.h" + +int +gm107_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) +{ + struct nvkm_device *device = outp->base.disp->engine.subdev.device; + const u32 soff = outp->base.or * 0x800; + const u32 data = 0x01010101 * pattern; + if (outp->base.info.sorconf.link & 1) + nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data); + else + nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data); + return 0; +} + +static const struct nvkm_output_dp_func +gm107_sor_dp_func = { + .pattern = gm107_sor_dp_pattern, + .lnk_pwr = g94_sor_dp_lnk_pwr, + .lnk_ctl = gf119_sor_dp_lnk_ctl, + .drv_ctl = gf119_sor_dp_drv_ctl, +}; + +int +gm107_sor_dp_new(struct nvkm_disp *disp, int index, + struct dcb_output *dcbE, struct nvkm_output **poutp) +{ + return nvkm_output_dp_new_(&gm107_sor_dp_func, disp, index, dcbE, poutp); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c index 2cfbef9c344f..c44fa7ea672a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c @@ -57,19 +57,6 @@ gm200_sor_dp_lane_map(struct nvkm_device *device, u8 lane) } static int -gm200_sor_dp_pattern(struct nvkm_output_dp *outp, int pattern) -{ - struct nvkm_device *device = outp->base.disp->engine.subdev.device; - const u32 soff = gm200_sor_soff(outp); - const u32 data = 0x01010101 * pattern; - if (outp->base.info.sorconf.link & 1) - nvkm_mask(device, 0x61c110 + soff, 0x0f0f0f0f, data); - else - nvkm_mask(device, 0x61c12c + soff, 0x0f0f0f0f, data); - return 0; -} - -static int gm200_sor_dp_lnk_pwr(struct nvkm_output_dp *outp, int nr) { struct nvkm_device *device = outp->base.disp->engine.subdev.device; @@ -129,7 +116,7 @@ gm200_sor_dp_drv_ctl(struct nvkm_output_dp *outp, static const struct nvkm_output_dp_func gm200_sor_dp_func = { - .pattern = gm200_sor_dp_pattern, + .pattern = gm107_sor_dp_pattern, .lnk_pwr = gm200_sor_dp_lnk_pwr, .lnk_ctl = gf119_sor_dp_lnk_ctl, .drv_ctl = gm200_sor_dp_drv_ctl, diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild index 65e5d291ecda..98651a43bc12 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/Kbuild @@ -13,6 +13,7 @@ nvkm-y += nvkm/engine/fifo/gk20a.o nvkm-y += nvkm/engine/fifo/gm107.o nvkm-y += nvkm/engine/fifo/gm200.o nvkm-y += nvkm/engine/fifo/gm20b.o +nvkm-y += nvkm/engine/fifo/gp100.o nvkm-y += nvkm/engine/fifo/chan.o nvkm-y += nvkm/engine/fifo/channv50.o @@ -31,3 +32,4 @@ nvkm-y += nvkm/engine/fifo/gpfifogf100.o nvkm-y += nvkm/engine/fifo/gpfifogk104.o nvkm-y += nvkm/engine/fifo/gpfifogk110.o nvkm-y += nvkm/engine/fifo/gpfifogm200.o +nvkm-y += nvkm/engine/fifo/gpfifogp100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h index e06f4d46f802..230f64e5f731 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/changk104.h @@ -27,4 +27,5 @@ int gk104_fifo_gpfifo_new(struct nvkm_fifo *, const struct nvkm_oclass *, extern const struct nvkm_fifo_chan_oclass gk104_fifo_gpfifo_oclass; extern const struct nvkm_fifo_chan_oclass gk110_fifo_gpfifo_oclass; extern const struct nvkm_fifo_chan_oclass gm200_fifo_gpfifo_oclass; +extern const struct nvkm_fifo_chan_oclass gp100_fifo_gpfifo_oclass; #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c index 743f3a189f28..103c0afaaa6d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gk104.c @@ -329,7 +329,7 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit) } if (eu == NULL) { - enum nvkm_devidx engidx = nvkm_top_fault(device->top, unit); + enum nvkm_devidx engidx = nvkm_top_fault(device, unit); if (engidx < NVKM_SUBDEV_NR) { const char *src = nvkm_subdev_name[engidx]; char *dst = en; @@ -589,7 +589,6 @@ gk104_fifo_oneinit(struct nvkm_fifo *base) struct gk104_fifo *fifo = gk104_fifo(base); struct nvkm_subdev *subdev = &fifo->base.engine.subdev; struct nvkm_device *device = subdev->device; - struct nvkm_top *top = device->top; int engn, runl, pbid, ret, i, j; enum nvkm_devidx engidx; u32 *map; @@ -608,7 +607,7 @@ gk104_fifo_oneinit(struct nvkm_fifo *base) /* Determine runlist configuration from topology device info. */ i = 0; - while ((int)(engidx = nvkm_top_engine(top, i++, &runl, &engn)) >= 0) { + while ((int)(engidx = nvkm_top_engine(device, i++, &runl, &engn)) >= 0) { /* Determine which PBDMA handles requests for this engine. */ for (j = 0, pbid = -1; j < fifo->pbdma_nr; j++) { if (map[j] & (1 << runl)) { @@ -617,8 +616,8 @@ gk104_fifo_oneinit(struct nvkm_fifo *base) } } - nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d\n", - engn, runl, pbid); + nvkm_debug(subdev, "engine %2d: runlist %2d pbdma %2d (%s)\n", + engn, runl, pbid, nvkm_subdev_name[engidx]); fifo->engine[engn].engine = nvkm_device_engine(device, engidx); fifo->engine[engn].runl = runl; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c new file mode 100644 index 000000000000..eff83f7fb705 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gp100.c @@ -0,0 +1,67 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "gk104.h" +#include "changk104.h" + +static const struct nvkm_enum +gp100_fifo_fault_engine[] = { + { 0x01, "DISPLAY" }, + { 0x03, "IFB", NULL, NVKM_ENGINE_IFB }, + { 0x04, "BAR1", NULL, NVKM_SUBDEV_BAR }, + { 0x05, "BAR2", NULL, NVKM_SUBDEV_INSTMEM }, + { 0x06, "HOST0" }, + { 0x07, "HOST1" }, + { 0x08, "HOST2" }, + { 0x09, "HOST3" }, + { 0x0a, "HOST4" }, + { 0x0b, "HOST5" }, + { 0x0c, "HOST6" }, + { 0x0d, "HOST7" }, + { 0x0e, "HOST8" }, + { 0x0f, "HOST9" }, + { 0x10, "HOST10" }, + { 0x13, "PERF" }, + { 0x17, "PMU" }, + { 0x18, "PTP" }, + { 0x1f, "PHYSICAL" }, + {} +}; + +static const struct gk104_fifo_func +gp100_fifo = { + .fault.engine = gp100_fifo_fault_engine, + .fault.reason = gk104_fifo_fault_reason, + .fault.hubclient = gk104_fifo_fault_hubclient, + .fault.gpcclient = gk104_fifo_fault_gpcclient, + .chan = { + &gp100_fifo_gpfifo_oclass, + NULL + }, +}; + +int +gp100_fifo_new(struct nvkm_device *device, int index, struct nvkm_fifo **pfifo) +{ + return gk104_fifo_new_(&gp100_fifo, device, index, 4096, pfifo); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c new file mode 100644 index 000000000000..1530a9217aea --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/fifo/gpfifogp100.c @@ -0,0 +1,34 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "changk104.h" + +#include <nvif/class.h> + +const struct nvkm_fifo_chan_oclass +gp100_fifo_gpfifo_oclass = { + .base.oclass = PASCAL_CHANNEL_GPFIFO_A, + .base.minver = 0, + .base.maxver = 0, + .ctor = gk104_fifo_gpfifo_new, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild index 290ed0db8047..f1c494182248 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/Kbuild @@ -31,6 +31,7 @@ nvkm-y += nvkm/engine/gr/gk20a.o nvkm-y += nvkm/engine/gr/gm107.o nvkm-y += nvkm/engine/gr/gm200.o nvkm-y += nvkm/engine/gr/gm20b.o +nvkm-y += nvkm/engine/gr/gp100.o nvkm-y += nvkm/engine/gr/ctxnv40.o nvkm-y += nvkm/engine/gr/ctxnv50.o @@ -48,3 +49,4 @@ nvkm-y += nvkm/engine/gr/ctxgk20a.o nvkm-y += nvkm/engine/gr/ctxgm107.o nvkm-y += nvkm/engine/gr/ctxgm200.o nvkm-y += nvkm/engine/gr/ctxgm20b.o +nvkm-y += nvkm/engine/gr/ctxgp100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c index b02d8f50ea6a..bc77eea351a5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.c @@ -1240,7 +1240,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) const struct gf100_grctx_func *grctx = gr->func->grctx; u32 idle_timeout; - nvkm_mc_unk260(device->mc, 0); + nvkm_mc_unk260(device, 0); gf100_gr_mmio(gr, grctx->hub); gf100_gr_mmio(gr, grctx->gpc); @@ -1264,7 +1264,7 @@ gf100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) gf100_gr_icmd(gr, grctx->icmd); nvkm_wr32(device, 0x404154, idle_timeout); gf100_gr_mthd(gr, grctx->mthd); - nvkm_mc_unk260(device->mc, 1); + nvkm_mc_unk260(device, 1); } int diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h index ac895edce164..52048b5a5274 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h @@ -101,6 +101,8 @@ void gm200_grctx_generate_405b60(struct gf100_gr *); extern const struct gf100_grctx_func gm20b_grctx; +extern const struct gf100_grctx_func gp100_grctx; + /* context init value lists */ extern const struct gf100_gr_pack gf100_grctx_pack_icmd[]; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c index f521de11a299..c925ade5880e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf117.c @@ -226,7 +226,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) u32 idle_timeout; int i; - nvkm_mc_unk260(device->mc, 0); + nvkm_mc_unk260(device, 0); gf100_gr_mmio(gr, grctx->hub); gf100_gr_mmio(gr, grctx->gpc); @@ -253,7 +253,7 @@ gf117_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) gf100_gr_icmd(gr, grctx->icmd); nvkm_wr32(device, 0x404154, idle_timeout); gf100_gr_mthd(gr, grctx->mthd); - nvkm_mc_unk260(device->mc, 1); + nvkm_mc_unk260(device, 1); } const struct gf100_grctx_func diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index 9ba337778ef5..c46b3fdf7203 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c @@ -950,7 +950,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) u32 idle_timeout; int i; - nvkm_mc_unk260(device->mc, 0); + nvkm_mc_unk260(device, 0); gf100_gr_mmio(gr, grctx->hub); gf100_gr_mmio(gr, grctx->gpc); @@ -979,7 +979,7 @@ gk104_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) gf100_gr_icmd(gr, grctx->icmd); nvkm_wr32(device, 0x404154, idle_timeout); gf100_gr_mthd(gr, grctx->mthd); - nvkm_mc_unk260(device->mc, 1); + nvkm_mc_unk260(device, 1); nvkm_mask(device, 0x418800, 0x00200000, 0x00200000); nvkm_mask(device, 0x41be10, 0x00800000, 0x00800000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c new file mode 100644 index 000000000000..3d1ae7ddf7dd --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgp100.c @@ -0,0 +1,179 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "ctxgf100.h" + +#include <subdev/fb.h> + +/******************************************************************************* + * PGRAPH context implementation + ******************************************************************************/ + +static void +gp100_grctx_generate_pagepool(struct gf100_grctx *info) +{ + const struct gf100_grctx_func *grctx = info->gr->func->grctx; + const u32 access = NV_MEM_ACCESS_RW | NV_MEM_ACCESS_SYS; + const int s = 8; + const int b = mmio_vram(info, grctx->pagepool_size, (1 << s), access); + mmio_refn(info, 0x40800c, 0x00000000, s, b); + mmio_wr32(info, 0x408010, 0x80000000); + mmio_refn(info, 0x419004, 0x00000000, s, b); + mmio_wr32(info, 0x419008, 0x00000000); +} + +static void +gp100_grctx_generate_attrib(struct gf100_grctx *info) +{ + struct gf100_gr *gr = info->gr; + const struct gf100_grctx_func *grctx = gr->func->grctx; + const u32 alpha = grctx->alpha_nr; + const u32 attrib = grctx->attrib_nr; + const u32 pertpc = 0x20 * (grctx->attrib_nr_max + grctx->alpha_nr_max); + const u32 size = roundup(gr->tpc_total * pertpc, 0x80); + const u32 access = NV_MEM_ACCESS_RW; + const int s = 12; + const int b = mmio_vram(info, size, (1 << s), access); + const int max_batches = 0xffff; + u32 ao = 0; + u32 bo = ao + grctx->alpha_nr_max * gr->tpc_total; + int gpc, ppc, n = 0; + + mmio_refn(info, 0x418810, 0x80000000, s, b); + mmio_refn(info, 0x419848, 0x10000000, s, b); + mmio_refn(info, 0x419c2c, 0x10000000, s, b); + mmio_refn(info, 0x419b00, 0x00000000, s, b); + mmio_wr32(info, 0x419b04, 0x80000000 | size >> 7); + mmio_wr32(info, 0x405830, attrib); + mmio_wr32(info, 0x40585c, alpha); + mmio_wr32(info, 0x4064c4, ((alpha / 4) << 16) | max_batches); + + for (gpc = 0; gpc < gr->gpc_nr; gpc++) { + for (ppc = 0; ppc < gr->ppc_nr[gpc]; ppc++, n++) { + const u32 as = alpha * gr->ppc_tpc_nr[gpc][ppc]; + const u32 bs = attrib * gr->ppc_tpc_nr[gpc][ppc]; + const u32 u = 0x418ea0 + (n * 0x04); + const u32 o = PPC_UNIT(gpc, ppc, 0); + if (!(gr->ppc_mask[gpc] & (1 << ppc))) + continue; + mmio_wr32(info, o + 0xc0, bs); + mmio_wr32(info, o + 0xf4, bo); + mmio_wr32(info, o + 0xf0, bs); + bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc]; + mmio_wr32(info, o + 0xe4, as); + mmio_wr32(info, o + 0xf8, ao); + ao += grctx->alpha_nr_max * gr->ppc_tpc_nr[gpc][ppc]; + mmio_wr32(info, u, bs); + } + } + + mmio_wr32(info, 0x418eec, 0x00000000); + mmio_wr32(info, 0x41befc, 0x00000000); +} + +static void +gp100_grctx_generate_405b60(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4); + u32 dist[TPC_MAX / 4] = {}; + u32 gpcs[GPC_MAX * 2] = {}; + u8 tpcnr[GPC_MAX]; + int tpc, gpc, i; + + memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); + + /* won't result in the same distribution as the binary driver where + * some of the gpcs have more tpcs than others, but this shall do + * for the moment. the code for earlier gpus has this issue too. + */ + for (gpc = -1, i = 0; i < gr->tpc_total; i++) { + do { + gpc = (gpc + 1) % gr->gpc_nr; + } while(!tpcnr[gpc]); + tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--; + + dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8); + gpcs[gpc + (gr->gpc_nr * (tpc / 4))] |= i << (tpc * 8); + } + + for (i = 0; i < dist_nr; i++) + nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]); + for (i = 0; i < gr->gpc_nr * 2; i++) + nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]); +} + +static void +gp100_grctx_generate_main(struct gf100_gr *gr, struct gf100_grctx *info) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + const struct gf100_grctx_func *grctx = gr->func->grctx; + u32 idle_timeout, tmp; + int i; + + gf100_gr_mmio(gr, gr->fuc_sw_ctx); + + idle_timeout = nvkm_mask(device, 0x404154, 0xffffffff, 0x00000000); + + grctx->pagepool(info); + grctx->bundle(info); + grctx->attrib(info); + grctx->unkn(gr); + + gm200_grctx_generate_tpcid(gr); + gf100_grctx_generate_r406028(gr); + gk104_grctx_generate_r418bb8(gr); + + for (i = 0; i < 8; i++) + nvkm_wr32(device, 0x4064d0 + (i * 0x04), 0x00000000); + nvkm_wr32(device, 0x406500, 0x00000000); + + nvkm_wr32(device, 0x405b00, (gr->tpc_total << 8) | gr->gpc_nr); + + for (tmp = 0, i = 0; i < gr->gpc_nr; i++) + tmp |= ((1 << gr->tpc_nr[i]) - 1) << (i * 5); + nvkm_wr32(device, 0x4041c4, tmp); + + gp100_grctx_generate_405b60(gr); + + gf100_gr_icmd(gr, gr->fuc_bundle); + nvkm_wr32(device, 0x404154, idle_timeout); + gf100_gr_mthd(gr, gr->fuc_method); +} + +const struct gf100_grctx_func +gp100_grctx = { + .main = gp100_grctx_generate_main, + .unkn = gk104_grctx_generate_unkn, + .bundle = gm107_grctx_generate_bundle, + .bundle_size = 0x3000, + .bundle_min_gpm_fifo_depth = 0x180, + .bundle_token_limit = 0x1080, + .pagepool = gp100_grctx_generate_pagepool, + .pagepool_size = 0x20000, + .attrib = gp100_grctx_generate_attrib, + .attrib_nr_max = 0x660, + .attrib_nr = 0x440, + .alpha_nr_max = 0xc00, + .alpha_nr = 0x800, +}; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c index 9513badb8220..157919c788e6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c @@ -949,22 +949,41 @@ gf100_gr_trap_gpc_rop(struct gf100_gr *gr, int gpc) } static const struct nvkm_enum gf100_mp_warp_error[] = { - { 0x00, "NO_ERROR" }, - { 0x01, "STACK_MISMATCH" }, + { 0x01, "STACK_ERROR" }, + { 0x02, "API_STACK_ERROR" }, + { 0x03, "RET_EMPTY_STACK_ERROR" }, + { 0x04, "PC_WRAP" }, { 0x05, "MISALIGNED_PC" }, - { 0x08, "MISALIGNED_GPR" }, - { 0x09, "INVALID_OPCODE" }, - { 0x0d, "GPR_OUT_OF_BOUNDS" }, - { 0x0e, "MEM_OUT_OF_BOUNDS" }, - { 0x0f, "UNALIGNED_MEM_ACCESS" }, + { 0x06, "PC_OVERFLOW" }, + { 0x07, "MISALIGNED_IMMC_ADDR" }, + { 0x08, "MISALIGNED_REG" }, + { 0x09, "ILLEGAL_INSTR_ENCODING" }, + { 0x0a, "ILLEGAL_SPH_INSTR_COMBO" }, + { 0x0b, "ILLEGAL_INSTR_PARAM" }, + { 0x0c, "INVALID_CONST_ADDR" }, + { 0x0d, "OOR_REG" }, + { 0x0e, "OOR_ADDR" }, + { 0x0f, "MISALIGNED_ADDR" }, { 0x10, "INVALID_ADDR_SPACE" }, - { 0x11, "INVALID_PARAM" }, + { 0x11, "ILLEGAL_INSTR_PARAM2" }, + { 0x12, "INVALID_CONST_ADDR_LDC" }, + { 0x13, "GEOMETRY_SM_ERROR" }, + { 0x14, "DIVERGENT" }, + { 0x15, "WARP_EXIT" }, {} }; static const struct nvkm_bitfield gf100_mp_global_error[] = { + { 0x00000001, "SM_TO_SM_FAULT" }, + { 0x00000002, "L1_ERROR" }, { 0x00000004, "MULTIPLE_WARP_ERRORS" }, - { 0x00000008, "OUT_OF_STACK_SPACE" }, + { 0x00000008, "PHYSICAL_STACK_OVERFLOW" }, + { 0x00000010, "BPT_INT" }, + { 0x00000020, "BPT_PAUSE" }, + { 0x00000040, "SINGLE_STEP_COMPLETE" }, + { 0x20000000, "ECC_SEC_ERROR" }, + { 0x40000000, "ECC_DED_ERROR" }, + { 0x80000000, "TIMEOUT" }, {} }; @@ -1438,24 +1457,30 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr) struct nvkm_device *device = subdev->device; struct nvkm_secboot *sb = device->secboot; int i; + int ret = 0; if (gr->firmware) { /* load fuc microcode */ - nvkm_mc_unk260(device->mc, 0); + nvkm_mc_unk260(device, 0); /* securely-managed falcons must be reset using secure boot */ if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS)) - nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS); + ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS); else gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c, &gr->fuc409d); + if (ret) + return ret; + if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS)) - nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS); + ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS); else gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac, &gr->fuc41ad); + if (ret) + return ret; - nvkm_mc_unk260(device->mc, 1); + nvkm_mc_unk260(device, 1); /* start both of them running */ nvkm_wr32(device, 0x409840, 0xffffffff); @@ -1557,7 +1582,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr) } /* load HUB microcode */ - nvkm_mc_unk260(device->mc, 0); + nvkm_mc_unk260(device, 0); nvkm_wr32(device, 0x4091c0, 0x01000000); for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++) nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]); @@ -1580,7 +1605,7 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr) nvkm_wr32(device, 0x41a188, i >> 6); nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]); } - nvkm_mc_unk260(device->mc, 1); + nvkm_mc_unk260(device, 1); /* load register lists */ gf100_gr_init_csdata(gr, grctx->hub, 0x409000, 0x000, 0x000000); diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h index 2b98abdb9270..268b8d60ff73 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h @@ -292,4 +292,6 @@ extern const struct gf100_gr_init gm107_gr_init_l1c_0[]; extern const struct gf100_gr_init gm107_gr_init_wwdx_0[]; extern const struct gf100_gr_init gm107_gr_init_cbm_0[]; void gm107_gr_init_bios(struct gf100_gr *); + +void gm200_gr_init_gpc_mmu(struct gf100_gr *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c index 4ca8ed15191c..de8b806b88fd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gk20a.c @@ -361,6 +361,5 @@ gk20a_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) if (ret) return ret; - return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c index 4dfa4513bb6c..6435f1257572 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gm200.c @@ -38,7 +38,7 @@ gm200_gr_rops(struct gf100_gr *gr) return nvkm_rd32(gr->base.engine.subdev.device, 0x12006c); } -static void +void gm200_gr_init_gpc_mmu(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c new file mode 100644 index 000000000000..26ad79def0ff --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gp100.c @@ -0,0 +1,171 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "gf100.h" +#include "ctxgf100.h" + +#include <nvif/class.h> + +/******************************************************************************* + * PGRAPH engine/subdev functions + ******************************************************************************/ + +static void +gp100_gr_init_rop_active_fbps(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + /*XXX: otherwise identical to gm200 aside from mask.. do everywhere? */ + const u32 fbp_count = nvkm_rd32(device, 0x12006c) & 0x0000000f; + nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */ + nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */ +} + +static int +gp100_gr_init(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + const u32 magicgpc918 = DIV_ROUND_UP(0x00800000, gr->tpc_total); + u32 data[TPC_MAX / 8] = {}; + u8 tpcnr[GPC_MAX]; + int gpc, tpc, rop; + int i; + + gr->func->init_gpc_mmu(gr); + + gf100_gr_mmio(gr, gr->fuc_sw_nonctx); + + nvkm_wr32(device, GPC_UNIT(0, 0x3018), 0x00000001); + + memset(data, 0x00, sizeof(data)); + memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); + for (i = 0, gpc = -1; i < gr->tpc_total; i++) { + do { + gpc = (gpc + 1) % gr->gpc_nr; + } while (!tpcnr[gpc]); + tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--; + + data[i / 8] |= tpc << ((i % 8) * 4); + } + + nvkm_wr32(device, GPC_BCAST(0x0980), data[0]); + nvkm_wr32(device, GPC_BCAST(0x0984), data[1]); + nvkm_wr32(device, GPC_BCAST(0x0988), data[2]); + nvkm_wr32(device, GPC_BCAST(0x098c), data[3]); + + for (gpc = 0; gpc < gr->gpc_nr; gpc++) { + nvkm_wr32(device, GPC_UNIT(gpc, 0x0914), + gr->screen_tile_row_offset << 8 | gr->tpc_nr[gpc]); + nvkm_wr32(device, GPC_UNIT(gpc, 0x0910), 0x00040000 | + gr->tpc_total); + nvkm_wr32(device, GPC_UNIT(gpc, 0x0918), magicgpc918); + } + + nvkm_wr32(device, GPC_BCAST(0x3fd4), magicgpc918); + nvkm_wr32(device, GPC_BCAST(0x08ac), nvkm_rd32(device, 0x100800)); + nvkm_wr32(device, GPC_BCAST(0x033c), nvkm_rd32(device, 0x100804)); + + gr->func->init_rop_active_fbps(gr); + + nvkm_wr32(device, 0x400500, 0x00010001); + nvkm_wr32(device, 0x400100, 0xffffffff); + nvkm_wr32(device, 0x40013c, 0xffffffff); + nvkm_wr32(device, 0x400124, 0x00000002); + nvkm_wr32(device, 0x409c24, 0x000f0002); + nvkm_wr32(device, 0x405848, 0xc0000000); + nvkm_mask(device, 0x40584c, 0x00000000, 0x00000001); + nvkm_wr32(device, 0x404000, 0xc0000000); + nvkm_wr32(device, 0x404600, 0xc0000000); + nvkm_wr32(device, 0x408030, 0xc0000000); + nvkm_wr32(device, 0x404490, 0xc0000000); + nvkm_wr32(device, 0x406018, 0xc0000000); + nvkm_wr32(device, 0x407020, 0x40000000); + nvkm_wr32(device, 0x405840, 0xc0000000); + nvkm_wr32(device, 0x405844, 0x00ffffff); + nvkm_mask(device, 0x419cc0, 0x00000008, 0x00000008); + + nvkm_mask(device, 0x419c9c, 0x00010000, 0x00010000); + nvkm_mask(device, 0x419c9c, 0x00020000, 0x00020000); + + gr->func->init_ppc_exceptions(gr); + + for (gpc = 0; gpc < gr->gpc_nr; gpc++) { + nvkm_wr32(device, GPC_UNIT(gpc, 0x0420), 0xc0000000); + nvkm_wr32(device, GPC_UNIT(gpc, 0x0900), 0xc0000000); + nvkm_wr32(device, GPC_UNIT(gpc, 0x1028), 0xc0000000); + nvkm_wr32(device, GPC_UNIT(gpc, 0x0824), 0xc0000000); + for (tpc = 0; tpc < gr->tpc_nr[gpc]; tpc++) { + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x508), 0xffffffff); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x50c), 0xffffffff); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x224), 0xc0000000); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x48c), 0xc0000000); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x084), 0xc0000000); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x430), 0xc0000000); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x644), 0x00dffffe); + nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x64c), 0x00000105); + } + nvkm_wr32(device, GPC_UNIT(gpc, 0x2c90), 0xffffffff); + nvkm_wr32(device, GPC_UNIT(gpc, 0x2c94), 0xffffffff); + } + + for (rop = 0; rop < gr->rop_nr; rop++) { + nvkm_wr32(device, ROP_UNIT(rop, 0x144), 0x40000000); + nvkm_wr32(device, ROP_UNIT(rop, 0x070), 0x40000000); + nvkm_wr32(device, ROP_UNIT(rop, 0x204), 0xffffffff); + nvkm_wr32(device, ROP_UNIT(rop, 0x208), 0xffffffff); + } + + nvkm_wr32(device, 0x400108, 0xffffffff); + nvkm_wr32(device, 0x400138, 0xffffffff); + nvkm_wr32(device, 0x400118, 0xffffffff); + nvkm_wr32(device, 0x400130, 0xffffffff); + nvkm_wr32(device, 0x40011c, 0xffffffff); + nvkm_wr32(device, 0x400134, 0xffffffff); + + gf100_gr_zbc_init(gr); + + return gf100_gr_init_ctxctl(gr); +} + +static const struct gf100_gr_func +gp100_gr = { + .init = gp100_gr_init, + .init_gpc_mmu = gm200_gr_init_gpc_mmu, + .init_rop_active_fbps = gp100_gr_init_rop_active_fbps, + .init_ppc_exceptions = gk104_gr_init_ppc_exceptions, + .rops = gm200_gr_rops, + .ppc_nr = 2, + .grctx = &gp100_grctx, + .sclass = { + { -1, -1, FERMI_TWOD_A }, + { -1, -1, KEPLER_INLINE_TO_MEMORY_B }, + { -1, -1, PASCAL_A, &gf100_fermi }, + { -1, -1, PASCAL_COMPUTE_A }, + {} + } +}; + +int +gp100_gr_new(struct nvkm_device *device, int index, struct nvkm_gr **pgr) +{ + return gm200_gr_new_(&gp100_gr, device, index, pgr); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c index e15b9627b07e..f3c30b2a788e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c @@ -26,6 +26,49 @@ #include <subdev/bios.h> #include <subdev/bios/bmp.h> #include <subdev/bios/bit.h> +#include <subdev/bios/image.h> + +static bool +nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size) +{ + u32 p = *addr; + + if (*addr > bios->image0_size && bios->imaged_addr) { + *addr -= bios->image0_size; + *addr += bios->imaged_addr; + } + + if (unlikely(*addr + size >= bios->size)) { + nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr); + return false; + } + + return true; +} + +u8 +nvbios_rd08(struct nvkm_bios *bios, u32 addr) +{ + if (likely(nvbios_addr(bios, &addr, 1))) + return bios->data[addr]; + return 0x00; +} + +u16 +nvbios_rd16(struct nvkm_bios *bios, u32 addr) +{ + if (likely(nvbios_addr(bios, &addr, 2))) + return get_unaligned_le16(&bios->data[addr]); + return 0x0000; +} + +u32 +nvbios_rd32(struct nvkm_bios *bios, u32 addr) +{ + if (likely(nvbios_addr(bios, &addr, 4))) + return get_unaligned_le32(&bios->data[addr]); + return 0x00000000; +} u8 nvbios_checksum(const u8 *data, int size) @@ -100,8 +143,9 @@ int nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios) { struct nvkm_bios *bios; + struct nvbios_image image; struct bit_entry bit_i; - int ret; + int ret, idx = 0; if (!(bios = *pbios = kzalloc(sizeof(*bios), GFP_KERNEL))) return -ENOMEM; @@ -111,6 +155,19 @@ nvkm_bios_new(struct nvkm_device *device, int index, struct nvkm_bios **pbios) if (ret) return ret; + /* Some tables have weird pointers that need adjustment before + * they're dereferenced. I'm not entirely sure why... + */ + if (nvbios_image(bios, idx++, &image)) { + bios->image0_size = image.size; + while (nvbios_image(bios, idx++, &image)) { + if (image.type == 0xe0) { + bios->imaged_addr = image.base; + break; + } + } + } + /* detect type of vbios we're dealing with */ bios->bmp_offset = nvbios_findstr(bios->data, bios->size, "\xff\x7f""NV\0", 5); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c index a5e92135cd77..9efb1b48cd54 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/disp.c @@ -141,7 +141,8 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, { u16 data = nvbios_ocfg_entry(bios, outp, idx, ver, hdr, cnt, len); if (data) { - info->match = nvbios_rd16(bios, data + 0x00); + info->proto = nvbios_rd08(bios, data + 0x00); + info->flags = nvbios_rd16(bios, data + 0x01); info->clkcmp[0] = nvbios_rd16(bios, data + 0x02); info->clkcmp[1] = nvbios_rd16(bios, data + 0x04); } @@ -149,12 +150,13 @@ nvbios_ocfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, } u16 -nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u16 type, +nvbios_ocfg_match(struct nvkm_bios *bios, u16 outp, u8 proto, u8 flags, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, struct nvbios_ocfg *info) { u16 data, idx = 0; while ((data = nvbios_ocfg_parse(bios, outp, idx++, ver, hdr, cnt, len, info))) { - if (info->match == type) + if ((info->proto == proto || info->proto == 0xff) && + (info->flags == flags)) break; } return data; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c index 05332476354a..d89e78c4e689 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/dp.c @@ -40,6 +40,7 @@ nvbios_dp_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) case 0x30: case 0x40: case 0x41: + case 0x42: *hdr = nvbios_rd08(bios, data + 0x01); *len = nvbios_rd08(bios, data + 0x02); *cnt = nvbios_rd08(bios, data + 0x03); @@ -70,6 +71,7 @@ nvbios_dpout_entry(struct nvkm_bios *bios, u8 idx, break; case 0x40: case 0x41: + case 0x42: *hdr = nvbios_rd08(bios, data + 0x04); *cnt = 0; *len = 0; @@ -109,6 +111,7 @@ nvbios_dpout_parse(struct nvkm_bios *bios, u8 idx, break; case 0x40: case 0x41: + case 0x42: info->flags = nvbios_rd08(bios, data + 0x04); info->script[0] = nvbios_rd16(bios, data + 0x05); info->script[1] = nvbios_rd16(bios, data + 0x07); @@ -180,6 +183,11 @@ nvbios_dpcfg_parse(struct nvkm_bios *bios, u16 outp, u8 idx, info->pe = nvbios_rd08(bios, data + 0x02); info->tx_pu = nvbios_rd08(bios, data + 0x03); break; + case 0x42: + info->dc = nvbios_rd08(bios, data + 0x00); + info->pe = nvbios_rd08(bios, data + 0x01); + info->tx_pu = nvbios_rd08(bios, data + 0x02); + break; default: data = 0x0000; break; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c index 74b14cf09308..1dbff7aeafec 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/image.c @@ -68,11 +68,16 @@ nvbios_imagen(struct nvkm_bios *bios, struct nvbios_image *image) bool nvbios_image(struct nvkm_bios *bios, int idx, struct nvbios_image *image) { + u32 imaged_addr = bios->imaged_addr; memset(image, 0x00, sizeof(*image)); + bios->imaged_addr = 0; do { image->base += image->size; - if (image->last || !nvbios_imagen(bios, image)) + if (image->last || !nvbios_imagen(bios, image)) { + bios->imaged_addr = imaged_addr; return false; + } } while(idx--); + bios->imaged_addr = imaged_addr; return true; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c index 91a7dc56e406..2ca23a9157ab 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pll.c @@ -77,15 +77,17 @@ g84_pll_mapping[] = { {} }; -static u16 +static u32 pll_limits_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { struct bit_entry bit_C; - u16 data = 0x0000; + u32 data = 0x0000; if (!bit_entry(bios, 'C', &bit_C)) { if (bit_C.version == 1 && bit_C.length >= 10) data = nvbios_rd16(bios, bit_C.offset + 8); + if (bit_C.version == 2 && bit_C.length >= 4) + data = nvbios_rd32(bios, bit_C.offset + 0); if (data) { *ver = nvbios_rd08(bios, data + 0); *hdr = nvbios_rd08(bios, data + 1); @@ -137,12 +139,12 @@ pll_map(struct nvkm_bios *bios) } } -static u16 +static u32 pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len) { struct pll_mapping *map; u8 hdr, cnt; - u16 data; + u32 data; data = pll_limits_table(bios, ver, &hdr, &cnt, len); if (data && *ver >= 0x30) { @@ -160,7 +162,7 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len) map = pll_map(bios); while (map && map->reg) { if (map->reg == reg && *ver >= 0x20) { - u16 addr = (data += hdr); + u32 addr = (data += hdr); *type = map->type; while (cnt--) { if (nvbios_rd32(bios, data) == map->reg) @@ -179,12 +181,12 @@ pll_map_reg(struct nvkm_bios *bios, u32 reg, u32 *type, u8 *ver, u8 *len) return 0x0000; } -static u16 +static u32 pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len) { struct pll_mapping *map; u8 hdr, cnt; - u16 data; + u32 data; data = pll_limits_table(bios, ver, &hdr, &cnt, len); if (data && *ver >= 0x30) { @@ -202,7 +204,7 @@ pll_map_type(struct nvkm_bios *bios, u8 type, u32 *reg, u8 *ver, u8 *len) map = pll_map(bios); while (map && map->reg) { if (map->type == type && *ver >= 0x20) { - u16 addr = (data += hdr); + u32 addr = (data += hdr); *reg = map->reg; while (cnt--) { if (nvbios_rd32(bios, data) == map->reg) @@ -228,7 +230,7 @@ nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info) struct nvkm_device *device = subdev->device; u8 ver, len; u32 reg = type; - u16 data; + u32 data; if (type > PLL_MAX) { reg = type; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c index c268e5afe852..b4a308f3cf7b 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/pmu.c @@ -26,21 +26,6 @@ #include <subdev/bios/image.h> #include <subdev/bios/pmu.h> -static u32 -weirdo_pointer(struct nvkm_bios *bios, u32 data) -{ - struct nvbios_image image; - int idx = 0; - if (nvbios_image(bios, idx++, &image)) { - data -= image.size; - while (nvbios_image(bios, idx++, &image)) { - if (image.type == 0xe0) - return image.base + data; - } - } - return 0; -} - u32 nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { @@ -50,7 +35,7 @@ nvbios_pmuTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) if (!bit_entry(bios, 'p', &bit_p)) { if (bit_p.version == 2 && bit_p.length >= 4) data = nvbios_rd32(bios, bit_p.offset + 0x00); - if ((data = weirdo_pointer(bios, data))) { + if (data) { *ver = nvbios_rd08(bios, data + 0x00); /* maybe? */ *hdr = nvbios_rd08(bios, data + 0x01); *len = nvbios_rd08(bios, data + 0x02); @@ -97,8 +82,7 @@ nvbios_pmuRm(struct nvkm_bios *bios, u8 type, struct nvbios_pmuR *info) u32 data; memset(info, 0x00, sizeof(*info)); while ((data = nvbios_pmuEp(bios, idx++, &ver, &hdr, &pmuE))) { - if ( pmuE.type == type && - (data = weirdo_pointer(bios, pmuE.data))) { + if (pmuE.type == type && (data = pmuE.data)) { info->init_addr_pmu = nvbios_rd32(bios, data + 0x08); info->args_addr_pmu = nvbios_rd32(bios, data + 0x0c); info->boot_addr = data + 0x30; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c index d0ae7454764e..b57c370c725d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/rammap.c @@ -30,11 +30,11 @@ nvbios_rammapTe(struct nvkm_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len, u8 *snr, u8 *ssz) { struct bit_entry bit_P; - u16 rammap = 0x0000; + u32 rammap = 0x0000; if (!bit_entry(bios, 'P', &bit_P)) { if (bit_P.version == 2) - rammap = nvbios_rd16(bios, bit_P.offset + 4); + rammap = nvbios_rd32(bios, bit_P.offset + 4); if (rammap) { *ver = nvbios_rd08(bios, rammap + 0); @@ -61,7 +61,7 @@ nvbios_rammapEe(struct nvkm_bios *bios, int idx, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { u8 snr, ssz; - u16 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz); + u32 rammap = nvbios_rammapTe(bios, ver, hdr, cnt, len, &snr, &ssz); if (rammap && idx < *cnt) { rammap = rammap + *hdr + (idx * (*len + (snr * ssz))); *hdr = *len; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c index 78c449b417b7..89d5543118cf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gf100.c @@ -99,7 +99,7 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl) { struct nvkm_device *device = clk->base.subdev.device; u32 ssrc = nvkm_rd32(device, dsrc + (doff * 4)); - u32 sctl = nvkm_rd32(device, dctl + (doff * 4)); + u32 sclk, sctl, sdiv = 2; switch (ssrc & 0x00000003) { case 0: @@ -109,13 +109,21 @@ read_div(struct gf100_clk *clk, int doff, u32 dsrc, u32 dctl) case 2: return 100000; case 3: - if (sctl & 0x80000000) { - u32 sclk = read_vco(clk, dsrc + (doff * 4)); - u32 sdiv = (sctl & 0x0000003f) + 2; - return (sclk * 2) / sdiv; + sclk = read_vco(clk, dsrc + (doff * 4)); + + /* Memclk has doff of 0 despite its alt. location */ + if (doff <= 2) { + sctl = nvkm_rd32(device, dctl + (doff * 4)); + + if (sctl & 0x80000000) { + if (ssrc & 0x100) + sctl >>= 8; + + sdiv = (sctl & 0x3f) + 2; + } } - return read_vco(clk, dsrc + (doff * 4)); + return (sclk * 2) / sdiv; default: return 0; } @@ -366,11 +374,17 @@ gf100_clk_prog_2(struct gf100_clk *clk, int idx) if (info->coef) { nvkm_wr32(device, addr + 0x04, info->coef); nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001); + + /* Test PLL lock */ + nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000); nvkm_msec(device, 2000, if (nvkm_rd32(device, addr + 0x00) & 0x00020000) break; ); - nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004); + nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010); + + /* Enable sync mode */ + nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004); } } } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c index 975c401bccab..06bc0d2d6ae1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk104.c @@ -393,11 +393,17 @@ gk104_clk_prog_2(struct gk104_clk *clk, int idx) if (info->coef) { nvkm_wr32(device, addr + 0x04, info->coef); nvkm_mask(device, addr + 0x00, 0x00000001, 0x00000001); + + /* Test PLL lock */ + nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000000); nvkm_msec(device, 2000, if (nvkm_rd32(device, addr + 0x00) & 0x00020000) break; ); - nvkm_mask(device, addr + 0x00, 0x00020004, 0x00000004); + nvkm_mask(device, addr + 0x00, 0x00000010, 0x00000010); + + /* Enable sync mode */ + nvkm_mask(device, addr + 0x00, 0x00000004, 0x00000004); } } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c index 5f0ee24e31b8..218893e3e5f9 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.c @@ -28,69 +28,6 @@ #include <core/tegra.h> #include <subdev/timer.h> -#define KHZ (1000) -#define MHZ (KHZ * 1000) - -#define MASK(w) ((1 << w) - 1) - -#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0) -#define GPCPLL_CFG_ENABLE BIT(0) -#define GPCPLL_CFG_IDDQ BIT(1) -#define GPCPLL_CFG_LOCK_DET_OFF BIT(4) -#define GPCPLL_CFG_LOCK BIT(17) - -#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4) -#define GPCPLL_COEFF_M_SHIFT 0 -#define GPCPLL_COEFF_M_WIDTH 8 -#define GPCPLL_COEFF_N_SHIFT 8 -#define GPCPLL_COEFF_N_WIDTH 8 -#define GPCPLL_COEFF_P_SHIFT 16 -#define GPCPLL_COEFF_P_WIDTH 6 - -#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc) -#define GPCPLL_CFG2_SETUP2_SHIFT 16 -#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24 - -#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18) -#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16 - -#define GPC_BCASE_GPCPLL_CFG_BASE 0x00132800 -#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c) -#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0 -#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8 -#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16 -#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22 -#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31 - -#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100) -#define SEL_VCO_GPC2CLK_OUT_SHIFT 0 - -#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250) -#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1 -#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31 -#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1 -#define GPC2CLK_OUT_VCODIV_WIDTH 6 -#define GPC2CLK_OUT_VCODIV_SHIFT 8 -#define GPC2CLK_OUT_VCODIV1 0 -#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \ - GPC2CLK_OUT_VCODIV_SHIFT) -#define GPC2CLK_OUT_BYPDIV_WIDTH 6 -#define GPC2CLK_OUT_BYPDIV_SHIFT 0 -#define GPC2CLK_OUT_BYPDIV31 0x3c -#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \ - GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\ - | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\ - | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT)) -#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \ - GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \ - | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \ - | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT)) - -#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCASE_GPCPLL_CFG_BASE + 0xa0) -#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24 -#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \ - (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT) - static const u8 _pl_to_div[] = { /* PL: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */ /* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32, @@ -124,7 +61,7 @@ static const struct gk20a_clk_pllg_params gk20a_pllg_params = { .min_pl = 1, .max_pl = 32, }; -static void +void gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll) { struct nvkm_device *device = clk->base.subdev.device; @@ -136,20 +73,33 @@ gk20a_pllg_read_mnp(struct gk20a_clk *clk, struct gk20a_pll *pll) pll->pl = (val >> GPCPLL_COEFF_P_SHIFT) & MASK(GPCPLL_COEFF_P_WIDTH); } -static u32 -gk20a_pllg_calc_rate(struct gk20a_clk *clk) +void +gk20a_pllg_write_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 val; + + val = (pll->m & MASK(GPCPLL_COEFF_M_WIDTH)) << GPCPLL_COEFF_M_SHIFT; + val |= (pll->n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT; + val |= (pll->pl & MASK(GPCPLL_COEFF_P_WIDTH)) << GPCPLL_COEFF_P_SHIFT; + nvkm_wr32(device, GPCPLL_COEFF, val); +} + +u32 +gk20a_pllg_calc_rate(struct gk20a_clk *clk, struct gk20a_pll *pll) { u32 rate; u32 divider; - rate = clk->parent_rate * clk->pll.n; - divider = clk->pll.m * clk->pl_to_div(clk->pll.pl); + rate = clk->parent_rate * pll->n; + divider = pll->m * clk->pl_to_div(pll->pl); return rate / divider / 2; } -static int -gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate) +int +gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate, + struct gk20a_pll *pll) { struct nvkm_subdev *subdev = &clk->base.subdev; u32 target_clk_f, ref_clk_f, target_freq; @@ -163,16 +113,13 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate) target_clk_f = rate * 2 / KHZ; ref_clk_f = clk->parent_rate / KHZ; - max_vco_f = clk->params->max_vco; + target_vco_f = target_clk_f + target_clk_f / 50; + max_vco_f = max(clk->params->max_vco, target_vco_f); min_vco_f = clk->params->min_vco; best_m = clk->params->max_m; best_n = clk->params->min_n; best_pl = clk->params->min_pl; - target_vco_f = target_clk_f + target_clk_f / 50; - if (max_vco_f < target_vco_f) - max_vco_f = target_vco_f; - /* min_pl <= high_pl <= max_pl */ high_pl = (max_vco_f + target_vco_f - 1) / target_vco_f; high_pl = min(high_pl, clk->params->max_pl); @@ -195,9 +142,7 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate) target_vco_f = target_clk_f * clk->pl_to_div(pl); for (m = clk->params->min_m; m <= clk->params->max_m; m++) { - u32 u_f, vco_f; - - u_f = ref_clk_f / m; + u32 u_f = ref_clk_f / m; if (u_f < clk->params->min_u) break; @@ -211,6 +156,8 @@ gk20a_pllg_calc_mnp(struct gk20a_clk *clk, unsigned long rate) break; for (; n <= n2; n++) { + u32 vco_f; + if (n < clk->params->min_n) continue; if (n > clk->params->max_n) @@ -247,16 +194,16 @@ found_match: "no best match for target @ %dMHz on gpc_pll", target_clk_f / KHZ); - clk->pll.m = best_m; - clk->pll.n = best_n; - clk->pll.pl = best_pl; + pll->m = best_m; + pll->n = best_n; + pll->pl = best_pl; - target_freq = gk20a_pllg_calc_rate(clk); + target_freq = gk20a_pllg_calc_rate(clk, pll); nvkm_debug(subdev, - "actual target freq %d MHz, M %d, N %d, PL %d(div%d)\n", - target_freq / MHZ, clk->pll.m, clk->pll.n, clk->pll.pl, - clk->pl_to_div(clk->pll.pl)); + "actual target freq %d KHz, M %d, N %d, PL %d(div%d)\n", + target_freq / KHZ, pll->m, pll->n, pll->pl, + clk->pl_to_div(pll->pl)); return 0; } @@ -265,45 +212,36 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n) { struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; - u32 val; - int ramp_timeout; + struct gk20a_pll pll; + int ret = 0; /* get old coefficients */ - val = nvkm_rd32(device, GPCPLL_COEFF); + gk20a_pllg_read_mnp(clk, &pll); /* do nothing if NDIV is the same */ - if (n == ((val >> GPCPLL_COEFF_N_SHIFT) & MASK(GPCPLL_COEFF_N_WIDTH))) + if (n == pll.n) return 0; - /* setup */ - nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT, - 0x2b << GPCPLL_CFG2_PLL_STEPA_SHIFT); - nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT, - 0xb << GPCPLL_CFG3_PLL_STEPB_SHIFT); - /* pll slowdown mode */ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); /* new ndiv ready for ramp */ - val = nvkm_rd32(device, GPCPLL_COEFF); - val &= ~(MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT); - val |= (n & MASK(GPCPLL_COEFF_N_WIDTH)) << GPCPLL_COEFF_N_SHIFT; + pll.n = n; udelay(1); - nvkm_wr32(device, GPCPLL_COEFF, val); + gk20a_pllg_write_mnp(clk, &pll); /* dynamic ramp to new ndiv */ - val = nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN); - val |= 0x1 << GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT; udelay(1); - nvkm_wr32(device, GPCPLL_NDIV_SLOWDOWN, val); + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT)); - for (ramp_timeout = 500; ramp_timeout > 0; ramp_timeout--) { - udelay(1); - val = nvkm_rd32(device, GPC_BCAST_NDIV_SLOWDOWN_DEBUG); - if (val & GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) - break; - } + /* wait for ramping to complete */ + if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG, + GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK, + GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0) + ret = -ETIMEDOUT; /* exit slowdown mode */ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, @@ -311,21 +249,35 @@ gk20a_pllg_slide(struct gk20a_clk *clk, u32 n) BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN); - if (ramp_timeout <= 0) { - nvkm_error(subdev, "gpcpll dynamic ramp timeout\n"); - return -ETIMEDOUT; - } - - return 0; + return ret; } -static void +static int gk20a_pllg_enable(struct gk20a_clk *clk) { struct nvkm_device *device = clk->base.subdev.device; + u32 val; nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); nvkm_rd32(device, GPCPLL_CFG); + + /* enable lock detection */ + val = nvkm_rd32(device, GPCPLL_CFG); + if (val & GPCPLL_CFG_LOCK_DET_OFF) { + val &= ~GPCPLL_CFG_LOCK_DET_OFF; + nvkm_wr32(device, GPCPLL_CFG, val); + } + + /* wait for lock */ + if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK, + GPCPLL_CFG_LOCK) < 0) + return -ETIMEDOUT; + + /* switch to VCO mode */ + nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), + BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); + + return 0; } static void @@ -333,117 +285,81 @@ gk20a_pllg_disable(struct gk20a_clk *clk) { struct nvkm_device *device = clk->base.subdev.device; + /* put PLL in bypass before disabling it */ + nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); nvkm_rd32(device, GPCPLL_CFG); } static int -_gk20a_pllg_program_mnp(struct gk20a_clk *clk, bool allow_slide) +gk20a_pllg_program_mnp(struct gk20a_clk *clk, const struct gk20a_pll *pll) { struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; - u32 val, cfg; - struct gk20a_pll old_pll; - u32 n_lo; - - /* get old coefficients */ - gk20a_pllg_read_mnp(clk, &old_pll); - - /* do NDIV slide if there is no change in M and PL */ - cfg = nvkm_rd32(device, GPCPLL_CFG); - if (allow_slide && clk->pll.m == old_pll.m && - clk->pll.pl == old_pll.pl && (cfg & GPCPLL_CFG_ENABLE)) { - return gk20a_pllg_slide(clk, clk->pll.n); - } - - /* slide down to NDIV_LO */ - if (allow_slide && (cfg & GPCPLL_CFG_ENABLE)) { - int ret; - - n_lo = DIV_ROUND_UP(old_pll.m * clk->params->min_vco, - clk->parent_rate / KHZ); - ret = gk20a_pllg_slide(clk, n_lo); + struct gk20a_pll cur_pll; + int ret; - if (ret) - return ret; - } + gk20a_pllg_read_mnp(clk, &cur_pll); - /* split FO-to-bypass jump in halfs by setting out divider 1:2 */ + /* split VCO-to-bypass jump in half by setting out divider 1:2 */ nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, - 0x2 << GPC2CLK_OUT_VCODIV_SHIFT); - - /* put PLL in bypass before programming it */ - val = nvkm_rd32(device, SEL_VCO); - val &= ~(BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); + GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT); + /* Intentional 2nd write to assure linear divider operation */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT); + nvkm_rd32(device, GPC2CLK_OUT); udelay(2); - nvkm_wr32(device, SEL_VCO, val); - - /* get out from IDDQ */ - val = nvkm_rd32(device, GPCPLL_CFG); - if (val & GPCPLL_CFG_IDDQ) { - val &= ~GPCPLL_CFG_IDDQ; - nvkm_wr32(device, GPCPLL_CFG, val); - nvkm_rd32(device, GPCPLL_CFG); - udelay(2); - } gk20a_pllg_disable(clk); - nvkm_debug(subdev, "%s: m=%d n=%d pl=%d\n", __func__, - clk->pll.m, clk->pll.n, clk->pll.pl); - - n_lo = DIV_ROUND_UP(clk->pll.m * clk->params->min_vco, - clk->parent_rate / KHZ); - val = clk->pll.m << GPCPLL_COEFF_M_SHIFT; - val |= (allow_slide ? n_lo : clk->pll.n) << GPCPLL_COEFF_N_SHIFT; - val |= clk->pll.pl << GPCPLL_COEFF_P_SHIFT; - nvkm_wr32(device, GPCPLL_COEFF, val); + gk20a_pllg_write_mnp(clk, pll); - gk20a_pllg_enable(clk); - - val = nvkm_rd32(device, GPCPLL_CFG); - if (val & GPCPLL_CFG_LOCK_DET_OFF) { - val &= ~GPCPLL_CFG_LOCK_DET_OFF; - nvkm_wr32(device, GPCPLL_CFG, val); - } - - if (nvkm_usec(device, 300, - if (nvkm_rd32(device, GPCPLL_CFG) & GPCPLL_CFG_LOCK) - break; - ) < 0) - return -ETIMEDOUT; - - /* switch to VCO mode */ - nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), - BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); + ret = gk20a_pllg_enable(clk); + if (ret) + return ret; /* restore out divider 1:1 */ - val = nvkm_rd32(device, GPC2CLK_OUT); - if ((val & GPC2CLK_OUT_VCODIV_MASK) != - (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT)) { - val &= ~GPC2CLK_OUT_VCODIV_MASK; - val |= GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT; - udelay(2); - nvkm_wr32(device, GPC2CLK_OUT, val); - /* Intentional 2nd write to assure linear divider operation */ - nvkm_wr32(device, GPC2CLK_OUT, val); - nvkm_rd32(device, GPC2CLK_OUT); - } + udelay(2); + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT); + /* Intentional 2nd write to assure linear divider operation */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT); + nvkm_rd32(device, GPC2CLK_OUT); - /* slide up to new NDIV */ - return allow_slide ? gk20a_pllg_slide(clk, clk->pll.n) : 0; + return 0; } static int -gk20a_pllg_program_mnp(struct gk20a_clk *clk) +gk20a_pllg_program_mnp_slide(struct gk20a_clk *clk, const struct gk20a_pll *pll) { - int err; + struct gk20a_pll cur_pll; + int ret; - err = _gk20a_pllg_program_mnp(clk, true); - if (err) - err = _gk20a_pllg_program_mnp(clk, false); + if (gk20a_pllg_is_enabled(clk)) { + gk20a_pllg_read_mnp(clk, &cur_pll); + + /* just do NDIV slide if there is no change to M and PL */ + if (pll->m == cur_pll.m && pll->pl == cur_pll.pl) + return gk20a_pllg_slide(clk, pll->n); + + /* slide down to current NDIV_LO */ + cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll); + ret = gk20a_pllg_slide(clk, cur_pll.n); + if (ret) + return ret; + } + + /* program MNP with the new clock parameters and new NDIV_LO */ + cur_pll = *pll; + cur_pll.n = gk20a_pllg_n_lo(clk, &cur_pll); + ret = gk20a_pllg_program_mnp(clk, &cur_pll); + if (ret) + return ret; - return err; + /* slide up to new NDIV */ + return gk20a_pllg_slide(clk, pll->n); } static struct nvkm_pstate @@ -546,13 +462,14 @@ gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src) struct gk20a_clk *clk = gk20a_clk(base); struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; + struct gk20a_pll pll; switch (src) { case nv_clk_src_crystal: return device->crystal; case nv_clk_src_gpc: - gk20a_pllg_read_mnp(clk, &clk->pll); - return gk20a_pllg_calc_rate(clk) / GK20A_CLK_GPC_MDIV; + gk20a_pllg_read_mnp(clk, &pll); + return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV; default: nvkm_error(subdev, "invalid clock source %d\n", src); return -EINVAL; @@ -565,15 +482,20 @@ gk20a_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) struct gk20a_clk *clk = gk20a_clk(base); return gk20a_pllg_calc_mnp(clk, cstate->domain[nv_clk_src_gpc] * - GK20A_CLK_GPC_MDIV); + GK20A_CLK_GPC_MDIV, &clk->pll); } int gk20a_clk_prog(struct nvkm_clk *base) { struct gk20a_clk *clk = gk20a_clk(base); + int ret; + + ret = gk20a_pllg_program_mnp_slide(clk, &clk->pll); + if (ret) + ret = gk20a_pllg_program_mnp(clk, &clk->pll); - return gk20a_pllg_program_mnp(clk); + return ret; } void @@ -581,29 +503,62 @@ gk20a_clk_tidy(struct nvkm_clk *base) { } +int +gk20a_clk_setup_slide(struct gk20a_clk *clk) +{ + struct nvkm_subdev *subdev = &clk->base.subdev; + struct nvkm_device *device = subdev->device; + u32 step_a, step_b; + + switch (clk->parent_rate) { + case 12000000: + case 12800000: + case 13000000: + step_a = 0x2b; + step_b = 0x0b; + break; + case 19200000: + step_a = 0x12; + step_b = 0x08; + break; + case 38400000: + step_a = 0x04; + step_b = 0x05; + break; + default: + nvkm_error(subdev, "invalid parent clock rate %u KHz", + clk->parent_rate / KHZ); + return -EINVAL; + } + + nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT, + step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT); + nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT, + step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT); + + return 0; +} + void gk20a_clk_fini(struct nvkm_clk *base) { struct nvkm_device *device = base->subdev.device; struct gk20a_clk *clk = gk20a_clk(base); - u32 val; /* slide to VCO min */ - val = nvkm_rd32(device, GPCPLL_CFG); - if (val & GPCPLL_CFG_ENABLE) { + if (gk20a_pllg_is_enabled(clk)) { struct gk20a_pll pll; u32 n_lo; gk20a_pllg_read_mnp(clk, &pll); - n_lo = DIV_ROUND_UP(pll.m * clk->params->min_vco, - clk->parent_rate / KHZ); + n_lo = gk20a_pllg_n_lo(clk, &pll); gk20a_pllg_slide(clk, n_lo); } - /* put PLL in bypass before disabling it */ - nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); - gk20a_pllg_disable(clk); + + /* set IDDQ */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1); } static int @@ -614,9 +569,18 @@ gk20a_clk_init(struct nvkm_clk *base) struct nvkm_device *device = subdev->device; int ret; + /* get out from IDDQ */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0); + nvkm_rd32(device, GPCPLL_CFG); + udelay(5); + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL); + ret = gk20a_clk_setup_slide(clk); + if (ret) + return ret; + /* Start with lowest frequency */ base->func->calc(base, &base->func->pstates[0].base); ret = base->func->prog(&clk->base); @@ -646,7 +610,7 @@ gk20a_clk = { }; int -_gk20a_clk_ctor(struct nvkm_device *device, int index, +gk20a_clk_ctor(struct nvkm_device *device, int index, const struct nvkm_clk_func *func, const struct gk20a_clk_pllg_params *params, struct gk20a_clk *clk) @@ -685,7 +649,7 @@ gk20a_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) return -ENOMEM; *pclk = &clk->base; - ret = _gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params, + ret = gk20a_clk_ctor(device, index, &gk20a_clk, &gk20a_pllg_params, clk); clk->pl_to_div = pl_to_div; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h index 13c46740197d..0d1450972162 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gk20a.h @@ -24,9 +24,79 @@ #ifndef __NVKM_CLK_GK20A_H__ #define __NVKM_CLK_GK20A_H__ +#define KHZ (1000) +#define MHZ (KHZ * 1000) + +#define MASK(w) ((1 << (w)) - 1) + #define GK20A_CLK_GPC_MDIV 1000 #define SYS_GPCPLL_CFG_BASE 0x00137000 +#define GPCPLL_CFG (SYS_GPCPLL_CFG_BASE + 0) +#define GPCPLL_CFG_ENABLE BIT(0) +#define GPCPLL_CFG_IDDQ BIT(1) +#define GPCPLL_CFG_LOCK_DET_OFF BIT(4) +#define GPCPLL_CFG_LOCK BIT(17) + +#define GPCPLL_CFG2 (SYS_GPCPLL_CFG_BASE + 0xc) +#define GPCPLL_CFG2_SETUP2_SHIFT 16 +#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24 + +#define GPCPLL_CFG3 (SYS_GPCPLL_CFG_BASE + 0x18) +#define GPCPLL_CFG3_VCO_CTRL_SHIFT 0 +#define GPCPLL_CFG3_VCO_CTRL_WIDTH 9 +#define GPCPLL_CFG3_VCO_CTRL_MASK \ + (MASK(GPCPLL_CFG3_VCO_CTRL_WIDTH) << GPCPLL_CFG3_VCO_CTRL_SHIFT) +#define GPCPLL_CFG3_PLL_STEPB_SHIFT 16 +#define GPCPLL_CFG3_PLL_STEPB_WIDTH 8 + +#define GPCPLL_COEFF (SYS_GPCPLL_CFG_BASE + 4) +#define GPCPLL_COEFF_M_SHIFT 0 +#define GPCPLL_COEFF_M_WIDTH 8 +#define GPCPLL_COEFF_N_SHIFT 8 +#define GPCPLL_COEFF_N_WIDTH 8 +#define GPCPLL_COEFF_N_MASK \ + (MASK(GPCPLL_COEFF_N_WIDTH) << GPCPLL_COEFF_N_SHIFT) +#define GPCPLL_COEFF_P_SHIFT 16 +#define GPCPLL_COEFF_P_WIDTH 6 + +#define GPCPLL_NDIV_SLOWDOWN (SYS_GPCPLL_CFG_BASE + 0x1c) +#define GPCPLL_NDIV_SLOWDOWN_NDIV_LO_SHIFT 0 +#define GPCPLL_NDIV_SLOWDOWN_NDIV_MID_SHIFT 8 +#define GPCPLL_NDIV_SLOWDOWN_STEP_SIZE_LO2MID_SHIFT 16 +#define GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT 22 +#define GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT 31 + +#define GPC_BCAST_GPCPLL_CFG_BASE 0x00132800 +#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG (GPC_BCAST_GPCPLL_CFG_BASE + 0xa0) +#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT 24 +#define GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK \ + (0x1 << GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_SHIFT) + +#define SEL_VCO (SYS_GPCPLL_CFG_BASE + 0x100) +#define SEL_VCO_GPC2CLK_OUT_SHIFT 0 + +#define GPC2CLK_OUT (SYS_GPCPLL_CFG_BASE + 0x250) +#define GPC2CLK_OUT_SDIV14_INDIV4_WIDTH 1 +#define GPC2CLK_OUT_SDIV14_INDIV4_SHIFT 31 +#define GPC2CLK_OUT_SDIV14_INDIV4_MODE 1 +#define GPC2CLK_OUT_VCODIV_WIDTH 6 +#define GPC2CLK_OUT_VCODIV_SHIFT 8 +#define GPC2CLK_OUT_VCODIV1 0 +#define GPC2CLK_OUT_VCODIV2 2 +#define GPC2CLK_OUT_VCODIV_MASK (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << \ + GPC2CLK_OUT_VCODIV_SHIFT) +#define GPC2CLK_OUT_BYPDIV_WIDTH 6 +#define GPC2CLK_OUT_BYPDIV_SHIFT 0 +#define GPC2CLK_OUT_BYPDIV31 0x3c +#define GPC2CLK_OUT_INIT_MASK ((MASK(GPC2CLK_OUT_SDIV14_INDIV4_WIDTH) << \ + GPC2CLK_OUT_SDIV14_INDIV4_SHIFT)\ + | (MASK(GPC2CLK_OUT_VCODIV_WIDTH) << GPC2CLK_OUT_VCODIV_SHIFT)\ + | (MASK(GPC2CLK_OUT_BYPDIV_WIDTH) << GPC2CLK_OUT_BYPDIV_SHIFT)) +#define GPC2CLK_OUT_INIT_VAL ((GPC2CLK_OUT_SDIV14_INDIV4_MODE << \ + GPC2CLK_OUT_SDIV14_INDIV4_SHIFT) \ + | (GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT) \ + | (GPC2CLK_OUT_BYPDIV31 << GPC2CLK_OUT_BYPDIV_SHIFT)) /* All frequencies in Khz */ struct gk20a_clk_pllg_params { @@ -54,7 +124,29 @@ struct gk20a_clk { }; #define gk20a_clk(p) container_of((p), struct gk20a_clk, base) -int _gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *, +u32 gk20a_pllg_calc_rate(struct gk20a_clk *, struct gk20a_pll *); +int gk20a_pllg_calc_mnp(struct gk20a_clk *, unsigned long, struct gk20a_pll *); +void gk20a_pllg_read_mnp(struct gk20a_clk *, struct gk20a_pll *); +void gk20a_pllg_write_mnp(struct gk20a_clk *, const struct gk20a_pll *); + +static inline bool +gk20a_pllg_is_enabled(struct gk20a_clk *clk) +{ + struct nvkm_device *device = clk->base.subdev.device; + u32 val; + + val = nvkm_rd32(device, GPCPLL_CFG); + return val & GPCPLL_CFG_ENABLE; +} + +static inline u32 +gk20a_pllg_n_lo(struct gk20a_clk *clk, struct gk20a_pll *pll) +{ + return DIV_ROUND_UP(pll->m * clk->params->min_vco, + clk->parent_rate / KHZ); +} + +int gk20a_clk_ctor(struct nvkm_device *, int, const struct nvkm_clk_func *, const struct gk20a_clk_pllg_params *, struct gk20a_clk *); void gk20a_clk_fini(struct nvkm_clk *); int gk20a_clk_read(struct nvkm_clk *, enum nv_clk_src); @@ -62,4 +154,6 @@ int gk20a_clk_calc(struct nvkm_clk *, struct nvkm_cstate *); int gk20a_clk_prog(struct nvkm_clk *); void gk20a_clk_tidy(struct nvkm_clk *); +int gk20a_clk_setup_slide(struct gk20a_clk *); + #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c index 71b2bbb61973..b284e949f732 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gm20b.c @@ -21,20 +21,123 @@ */ #include <subdev/clk.h> +#include <subdev/volt.h> +#include <subdev/timer.h> #include <core/device.h> +#include <core/tegra.h> #include "priv.h" #include "gk20a.h" -#define KHZ (1000) -#define MHZ (KHZ * 1000) - -#define MASK(w) ((1 << w) - 1) +#define GPCPLL_CFG_SYNC_MODE BIT(2) #define BYPASSCTRL_SYS (SYS_GPCPLL_CFG_BASE + 0x340) #define BYPASSCTRL_SYS_GPCPLL_SHIFT 0 #define BYPASSCTRL_SYS_GPCPLL_WIDTH 1 +#define GPCPLL_CFG2_SDM_DIN_SHIFT 0 +#define GPCPLL_CFG2_SDM_DIN_WIDTH 8 +#define GPCPLL_CFG2_SDM_DIN_MASK \ + (MASK(GPCPLL_CFG2_SDM_DIN_WIDTH) << GPCPLL_CFG2_SDM_DIN_SHIFT) +#define GPCPLL_CFG2_SDM_DIN_NEW_SHIFT 8 +#define GPCPLL_CFG2_SDM_DIN_NEW_WIDTH 15 +#define GPCPLL_CFG2_SDM_DIN_NEW_MASK \ + (MASK(GPCPLL_CFG2_SDM_DIN_NEW_WIDTH) << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT) +#define GPCPLL_CFG2_SETUP2_SHIFT 16 +#define GPCPLL_CFG2_PLL_STEPA_SHIFT 24 + +#define GPCPLL_DVFS0 (SYS_GPCPLL_CFG_BASE + 0x10) +#define GPCPLL_DVFS0_DFS_COEFF_SHIFT 0 +#define GPCPLL_DVFS0_DFS_COEFF_WIDTH 7 +#define GPCPLL_DVFS0_DFS_COEFF_MASK \ + (MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH) << GPCPLL_DVFS0_DFS_COEFF_SHIFT) +#define GPCPLL_DVFS0_DFS_DET_MAX_SHIFT 8 +#define GPCPLL_DVFS0_DFS_DET_MAX_WIDTH 7 +#define GPCPLL_DVFS0_DFS_DET_MAX_MASK \ + (MASK(GPCPLL_DVFS0_DFS_DET_MAX_WIDTH) << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT) + +#define GPCPLL_DVFS1 (SYS_GPCPLL_CFG_BASE + 0x14) +#define GPCPLL_DVFS1_DFS_EXT_DET_SHIFT 0 +#define GPCPLL_DVFS1_DFS_EXT_DET_WIDTH 7 +#define GPCPLL_DVFS1_DFS_EXT_STRB_SHIFT 7 +#define GPCPLL_DVFS1_DFS_EXT_STRB_WIDTH 1 +#define GPCPLL_DVFS1_DFS_EXT_CAL_SHIFT 8 +#define GPCPLL_DVFS1_DFS_EXT_CAL_WIDTH 7 +#define GPCPLL_DVFS1_DFS_EXT_SEL_SHIFT 15 +#define GPCPLL_DVFS1_DFS_EXT_SEL_WIDTH 1 +#define GPCPLL_DVFS1_DFS_CTRL_SHIFT 16 +#define GPCPLL_DVFS1_DFS_CTRL_WIDTH 12 +#define GPCPLL_DVFS1_EN_SDM_SHIFT 28 +#define GPCPLL_DVFS1_EN_SDM_WIDTH 1 +#define GPCPLL_DVFS1_EN_SDM_BIT BIT(28) +#define GPCPLL_DVFS1_EN_DFS_SHIFT 29 +#define GPCPLL_DVFS1_EN_DFS_WIDTH 1 +#define GPCPLL_DVFS1_EN_DFS_BIT BIT(29) +#define GPCPLL_DVFS1_EN_DFS_CAL_SHIFT 30 +#define GPCPLL_DVFS1_EN_DFS_CAL_WIDTH 1 +#define GPCPLL_DVFS1_EN_DFS_CAL_BIT BIT(30) +#define GPCPLL_DVFS1_DFS_CAL_DONE_SHIFT 31 +#define GPCPLL_DVFS1_DFS_CAL_DONE_WIDTH 1 +#define GPCPLL_DVFS1_DFS_CAL_DONE_BIT BIT(31) + +#define GPC_BCAST_GPCPLL_DVFS2 (GPC_BCAST_GPCPLL_CFG_BASE + 0x20) +#define GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT BIT(16) + +#define GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT 24 +#define GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH 7 + +#define DFS_DET_RANGE 6 /* -2^6 ... 2^6-1 */ +#define SDM_DIN_RANGE 12 /* -2^12 ... 2^12-1 */ + +struct gm20b_clk_dvfs_params { + s32 coeff_slope; + s32 coeff_offs; + u32 vco_ctrl; +}; + +static const struct gm20b_clk_dvfs_params gm20b_dvfs_params = { + .coeff_slope = -165230, + .coeff_offs = 214007, + .vco_ctrl = 0x7 << 3, +}; + +/* + * base.n is now the *integer* part of the N factor. + * sdm_din contains n's decimal part. + */ +struct gm20b_pll { + struct gk20a_pll base; + u32 sdm_din; +}; + +struct gm20b_clk_dvfs { + u32 dfs_coeff; + s32 dfs_det_max; + s32 dfs_ext_cal; +}; + +struct gm20b_clk { + /* currently applied parameters */ + struct gk20a_clk base; + struct gm20b_clk_dvfs dvfs; + u32 uv; + + /* new parameters to apply */ + struct gk20a_pll new_pll; + struct gm20b_clk_dvfs new_dvfs; + u32 new_uv; + + const struct gm20b_clk_dvfs_params *dvfs_params; + + /* fused parameters */ + s32 uvdet_slope; + s32 uvdet_offs; + + /* safe frequency we can use at minimum voltage */ + u32 safe_fmax_vmin; +}; +#define gm20b_clk(p) container_of((gk20a_clk(p)), struct gm20b_clk, base) + static u32 pl_to_div(u32 pl) { return pl; @@ -53,6 +156,484 @@ static const struct gk20a_clk_pllg_params gm20b_pllg_params = { .min_pl = 1, .max_pl = 31, }; +static void +gm20b_pllg_read_mnp(struct gm20b_clk *clk, struct gm20b_pll *pll) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_device *device = subdev->device; + u32 val; + + gk20a_pllg_read_mnp(&clk->base, &pll->base); + val = nvkm_rd32(device, GPCPLL_CFG2); + pll->sdm_din = (val >> GPCPLL_CFG2_SDM_DIN_SHIFT) & + MASK(GPCPLL_CFG2_SDM_DIN_WIDTH); +} + +static void +gm20b_pllg_write_mnp(struct gm20b_clk *clk, const struct gm20b_pll *pll) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK, + pll->sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT); + gk20a_pllg_write_mnp(&clk->base, &pll->base); +} + +/* + * Determine DFS_COEFF for the requested voltage. Always select external + * calibration override equal to the voltage, and set maximum detection + * limit "0" (to make sure that PLL output remains under F/V curve when + * voltage increases). + */ +static void +gm20b_dvfs_calc_det_coeff(struct gm20b_clk *clk, s32 uv, + struct gm20b_clk_dvfs *dvfs) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + const struct gm20b_clk_dvfs_params *p = clk->dvfs_params; + u32 coeff; + /* Work with mv as uv would likely trigger an overflow */ + s32 mv = DIV_ROUND_CLOSEST(uv, 1000); + + /* coeff = slope * voltage + offset */ + coeff = DIV_ROUND_CLOSEST(mv * p->coeff_slope, 1000) + p->coeff_offs; + coeff = DIV_ROUND_CLOSEST(coeff, 1000); + dvfs->dfs_coeff = min_t(u32, coeff, MASK(GPCPLL_DVFS0_DFS_COEFF_WIDTH)); + + dvfs->dfs_ext_cal = DIV_ROUND_CLOSEST(uv - clk->uvdet_offs, + clk->uvdet_slope); + /* should never happen */ + if (abs(dvfs->dfs_ext_cal) >= BIT(DFS_DET_RANGE)) + nvkm_error(subdev, "dfs_ext_cal overflow!\n"); + + dvfs->dfs_det_max = 0; + + nvkm_debug(subdev, "%s uv: %d coeff: %x, ext_cal: %d, det_max: %d\n", + __func__, uv, dvfs->dfs_coeff, dvfs->dfs_ext_cal, + dvfs->dfs_det_max); +} + +/* + * Solve equation for integer and fractional part of the effective NDIV: + * + * n_eff = n_int + 1/2 + (SDM_DIN / 2^(SDM_DIN_RANGE + 1)) + + * (DVFS_COEFF * DVFS_DET_DELTA) / 2^DFS_DET_RANGE + * + * The SDM_DIN LSB is finally shifted out, since it is not accessible by sw. + */ +static void +gm20b_dvfs_calc_ndiv(struct gm20b_clk *clk, u32 n_eff, u32 *n_int, u32 *sdm_din) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + const struct gk20a_clk_pllg_params *p = clk->base.params; + u32 n; + s32 det_delta; + u32 rem, rem_range; + + /* calculate current ext_cal and subtract previous one */ + det_delta = DIV_ROUND_CLOSEST(((s32)clk->uv) - clk->uvdet_offs, + clk->uvdet_slope); + det_delta -= clk->dvfs.dfs_ext_cal; + det_delta = min(det_delta, clk->dvfs.dfs_det_max); + det_delta *= clk->dvfs.dfs_coeff; + + /* integer part of n */ + n = (n_eff << DFS_DET_RANGE) - det_delta; + /* should never happen! */ + if (n <= 0) { + nvkm_error(subdev, "ndiv <= 0 - setting to 1...\n"); + n = 1 << DFS_DET_RANGE; + } + if (n >> DFS_DET_RANGE > p->max_n) { + nvkm_error(subdev, "ndiv > max_n - setting to max_n...\n"); + n = p->max_n << DFS_DET_RANGE; + } + *n_int = n >> DFS_DET_RANGE; + + /* fractional part of n */ + rem = ((u32)n) & MASK(DFS_DET_RANGE); + rem_range = SDM_DIN_RANGE + 1 - DFS_DET_RANGE; + /* subtract 2^SDM_DIN_RANGE to account for the 1/2 of the equation */ + rem = (rem << rem_range) - BIT(SDM_DIN_RANGE); + /* lose 8 LSB and clip - sdm_din only keeps the most significant byte */ + *sdm_din = (rem >> BITS_PER_BYTE) & MASK(GPCPLL_CFG2_SDM_DIN_WIDTH); + + nvkm_debug(subdev, "%s n_eff: %d, n_int: %d, sdm_din: %d\n", __func__, + n_eff, *n_int, *sdm_din); +} + +static int +gm20b_pllg_slide(struct gm20b_clk *clk, u32 n) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_device *device = subdev->device; + struct gm20b_pll pll; + u32 n_int, sdm_din; + int ret = 0; + + /* calculate the new n_int/sdm_din for this n/uv */ + gm20b_dvfs_calc_ndiv(clk, n, &n_int, &sdm_din); + + /* get old coefficients */ + gm20b_pllg_read_mnp(clk, &pll); + /* do nothing if NDIV is the same */ + if (n_int == pll.base.n && sdm_din == pll.sdm_din) + return 0; + + /* pll slowdown mode */ + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); + + /* new ndiv ready for ramp */ + /* in DVFS mode SDM is updated via "new" field */ + nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_NEW_MASK, + sdm_din << GPCPLL_CFG2_SDM_DIN_NEW_SHIFT); + pll.base.n = n_int; + udelay(1); + gk20a_pllg_write_mnp(&clk->base, &pll.base); + + /* dynamic ramp to new ndiv */ + udelay(1); + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT)); + + /* wait for ramping to complete */ + if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG, + GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK, + GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0) + ret = -ETIMEDOUT; + + /* in DVFS mode complete SDM update */ + nvkm_mask(device, GPCPLL_CFG2, GPCPLL_CFG2_SDM_DIN_MASK, + sdm_din << GPCPLL_CFG2_SDM_DIN_SHIFT); + + /* exit slowdown mode */ + nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, + BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) | + BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); + nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN); + + return ret; +} + +static int +gm20b_pllg_enable(struct gm20b_clk *clk) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); + nvkm_rd32(device, GPCPLL_CFG); + + /* In DVFS mode lock cannot be used - so just delay */ + udelay(40); + + /* set SYNC_MODE for glitchless switch out of bypass */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, + GPCPLL_CFG_SYNC_MODE); + nvkm_rd32(device, GPCPLL_CFG); + + /* switch to VCO mode */ + nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), + BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); + + return 0; +} + +static void +gm20b_pllg_disable(struct gm20b_clk *clk) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + /* put PLL in bypass before disabling it */ + nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); + + /* clear SYNC_MODE before disabling PLL */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_SYNC_MODE, 0); + + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); + nvkm_rd32(device, GPCPLL_CFG); +} + +static int +gm20b_pllg_program_mnp(struct gm20b_clk *clk, const struct gk20a_pll *pll) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_device *device = subdev->device; + struct gm20b_pll cur_pll; + u32 n_int, sdm_din; + /* if we only change pdiv, we can do a glitchless transition */ + bool pdiv_only; + int ret; + + gm20b_dvfs_calc_ndiv(clk, pll->n, &n_int, &sdm_din); + gm20b_pllg_read_mnp(clk, &cur_pll); + pdiv_only = cur_pll.base.n == n_int && cur_pll.sdm_din == sdm_din && + cur_pll.base.m == pll->m; + + /* need full sequence if clock not enabled yet */ + if (!gk20a_pllg_is_enabled(&clk->base)) + pdiv_only = false; + + /* split VCO-to-bypass jump in half by setting out divider 1:2 */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT); + /* Intentional 2nd write to assure linear divider operation */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV2 << GPC2CLK_OUT_VCODIV_SHIFT); + nvkm_rd32(device, GPC2CLK_OUT); + udelay(2); + + if (pdiv_only) { + u32 old = cur_pll.base.pl; + u32 new = pll->pl; + + /* + * we can do a glitchless transition only if the old and new PL + * parameters share at least one bit set to 1. If this is not + * the case, calculate and program an interim PL that will allow + * us to respect that rule. + */ + if ((old & new) == 0) { + cur_pll.base.pl = min(old | BIT(ffs(new) - 1), + new | BIT(ffs(old) - 1)); + gk20a_pllg_write_mnp(&clk->base, &cur_pll.base); + } + + cur_pll.base.pl = new; + gk20a_pllg_write_mnp(&clk->base, &cur_pll.base); + } else { + /* disable before programming if more than pdiv changes */ + gm20b_pllg_disable(clk); + + cur_pll.base = *pll; + cur_pll.base.n = n_int; + cur_pll.sdm_din = sdm_din; + gm20b_pllg_write_mnp(clk, &cur_pll); + + ret = gm20b_pllg_enable(clk); + if (ret) + return ret; + } + + /* restore out divider 1:1 */ + udelay(2); + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT); + /* Intentional 2nd write to assure linear divider operation */ + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_VCODIV_MASK, + GPC2CLK_OUT_VCODIV1 << GPC2CLK_OUT_VCODIV_SHIFT); + nvkm_rd32(device, GPC2CLK_OUT); + + return 0; +} + +static int +gm20b_pllg_program_mnp_slide(struct gm20b_clk *clk, const struct gk20a_pll *pll) +{ + struct gk20a_pll cur_pll; + int ret; + + if (gk20a_pllg_is_enabled(&clk->base)) { + gk20a_pllg_read_mnp(&clk->base, &cur_pll); + + /* just do NDIV slide if there is no change to M and PL */ + if (pll->m == cur_pll.m && pll->pl == cur_pll.pl) + return gm20b_pllg_slide(clk, pll->n); + + /* slide down to current NDIV_LO */ + cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll); + ret = gm20b_pllg_slide(clk, cur_pll.n); + if (ret) + return ret; + } + + /* program MNP with the new clock parameters and new NDIV_LO */ + cur_pll = *pll; + cur_pll.n = gk20a_pllg_n_lo(&clk->base, &cur_pll); + ret = gm20b_pllg_program_mnp(clk, &cur_pll); + if (ret) + return ret; + + /* slide up to new NDIV */ + return gm20b_pllg_slide(clk, pll->n); +} + +static int +gm20b_clk_calc(struct nvkm_clk *base, struct nvkm_cstate *cstate) +{ + struct gm20b_clk *clk = gm20b_clk(base); + struct nvkm_subdev *subdev = &base->subdev; + struct nvkm_volt *volt = base->subdev.device->volt; + int ret; + + ret = gk20a_pllg_calc_mnp(&clk->base, cstate->domain[nv_clk_src_gpc] * + GK20A_CLK_GPC_MDIV, &clk->new_pll); + if (ret) + return ret; + + clk->new_uv = volt->vid[cstate->voltage].uv; + gm20b_dvfs_calc_det_coeff(clk, clk->new_uv, &clk->new_dvfs); + + nvkm_debug(subdev, "%s uv: %d uv\n", __func__, clk->new_uv); + + return 0; +} + +/* + * Compute PLL parameters that are always safe for the current voltage + */ +static void +gm20b_dvfs_calc_safe_pll(struct gm20b_clk *clk, struct gk20a_pll *pll) +{ + u32 rate = gk20a_pllg_calc_rate(&clk->base, pll) / KHZ; + u32 parent_rate = clk->base.parent_rate / KHZ; + u32 nmin, nsafe; + + /* remove a safe margin of 10% */ + if (rate > clk->safe_fmax_vmin) + rate = rate * (100 - 10) / 100; + + /* gpc2clk */ + rate *= 2; + + nmin = DIV_ROUND_UP(pll->m * clk->base.params->min_vco, parent_rate); + nsafe = pll->m * rate / (clk->base.parent_rate); + + if (nsafe < nmin) { + pll->pl = DIV_ROUND_UP(nmin * parent_rate, pll->m * rate); + nsafe = nmin; + } + + pll->n = nsafe; +} + +static void +gm20b_dvfs_program_coeff(struct gm20b_clk *clk, u32 coeff) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + /* strobe to read external DFS coefficient */ + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT); + + nvkm_mask(device, GPCPLL_DVFS0, GPCPLL_DVFS0_DFS_COEFF_MASK, + coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT); + + udelay(1); + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0); +} + +static void +gm20b_dvfs_program_ext_cal(struct gm20b_clk *clk, u32 dfs_det_cal) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + u32 val; + + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, MASK(DFS_DET_RANGE + 1), + dfs_det_cal); + udelay(1); + + val = nvkm_rd32(device, GPCPLL_DVFS1); + if (!(val & BIT(25))) { + /* Use external value to overwrite calibration value */ + val |= BIT(25) | BIT(16); + nvkm_wr32(device, GPCPLL_DVFS1, val); + } +} + +static void +gm20b_dvfs_program_dfs_detection(struct gm20b_clk *clk, + struct gm20b_clk_dvfs *dvfs) +{ + struct nvkm_device *device = clk->base.base.subdev.device; + + /* strobe to read external DFS coefficient */ + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT); + + nvkm_mask(device, GPCPLL_DVFS0, + GPCPLL_DVFS0_DFS_COEFF_MASK | GPCPLL_DVFS0_DFS_DET_MAX_MASK, + dvfs->dfs_coeff << GPCPLL_DVFS0_DFS_COEFF_SHIFT | + dvfs->dfs_det_max << GPCPLL_DVFS0_DFS_DET_MAX_SHIFT); + + udelay(1); + nvkm_mask(device, GPC_BCAST_GPCPLL_DVFS2, + GPC_BCAST_GPCPLL_DVFS2_DFS_EXT_STROBE_BIT, 0); + + gm20b_dvfs_program_ext_cal(clk, dvfs->dfs_ext_cal); +} + +static int +gm20b_clk_prog(struct nvkm_clk *base) +{ + struct gm20b_clk *clk = gm20b_clk(base); + u32 cur_freq; + int ret; + + /* No change in DVFS settings? */ + if (clk->uv == clk->new_uv) + goto prog; + + /* + * Interim step for changing DVFS detection settings: low enough + * frequency to be safe at at DVFS coeff = 0. + * + * 1. If voltage is increasing: + * - safe frequency target matches the lowest - old - frequency + * - DVFS settings are still old + * - Voltage already increased to new level by volt, but maximum + * detection limit assures PLL output remains under F/V curve + * + * 2. If voltage is decreasing: + * - safe frequency target matches the lowest - new - frequency + * - DVFS settings are still old + * - Voltage is also old, it will be lowered by volt afterwards + * + * Interim step can be skipped if old frequency is below safe minimum, + * i.e., it is low enough to be safe at any voltage in operating range + * with zero DVFS coefficient. + */ + cur_freq = nvkm_clk_read(&clk->base.base, nv_clk_src_gpc); + if (cur_freq > clk->safe_fmax_vmin) { + struct gk20a_pll pll_safe; + + if (clk->uv < clk->new_uv) + /* voltage will raise: safe frequency is current one */ + pll_safe = clk->base.pll; + else + /* voltage will drop: safe frequency is new one */ + pll_safe = clk->new_pll; + + gm20b_dvfs_calc_safe_pll(clk, &pll_safe); + ret = gm20b_pllg_program_mnp_slide(clk, &pll_safe); + if (ret) + return ret; + } + + /* + * DVFS detection settings transition: + * - Set DVFS coefficient zero + * - Set calibration level to new voltage + * - Set DVFS coefficient to match new voltage + */ + gm20b_dvfs_program_coeff(clk, 0); + gm20b_dvfs_program_ext_cal(clk, clk->new_dvfs.dfs_ext_cal); + gm20b_dvfs_program_coeff(clk, clk->new_dvfs.dfs_coeff); + gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs); + +prog: + clk->uv = clk->new_uv; + clk->dvfs = clk->new_dvfs; + clk->base.pll = clk->new_pll; + + return gm20b_pllg_program_mnp_slide(clk, &clk->base.pll); +} + static struct nvkm_pstate gm20b_pstates[] = { { @@ -133,9 +714,99 @@ gm20b_pstates[] = { .voltage = 12, }, }, - }; +static void +gm20b_clk_fini(struct nvkm_clk *base) +{ + struct nvkm_device *device = base->subdev.device; + struct gm20b_clk *clk = gm20b_clk(base); + + /* slide to VCO min */ + if (gk20a_pllg_is_enabled(&clk->base)) { + struct gk20a_pll pll; + u32 n_lo; + + gk20a_pllg_read_mnp(&clk->base, &pll); + n_lo = gk20a_pllg_n_lo(&clk->base, &pll); + gm20b_pllg_slide(clk, n_lo); + } + + gm20b_pllg_disable(clk); + + /* set IDDQ */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 1); +} + +static int +gm20b_clk_init_dvfs(struct gm20b_clk *clk) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_device *device = subdev->device; + bool fused = clk->uvdet_offs && clk->uvdet_slope; + static const s32 ADC_SLOPE_UV = 10000; /* default ADC detection slope */ + u32 data; + int ret; + + /* Enable NA DVFS */ + nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_BIT, + GPCPLL_DVFS1_EN_DFS_BIT); + + /* Set VCO_CTRL */ + if (clk->dvfs_params->vco_ctrl) + nvkm_mask(device, GPCPLL_CFG3, GPCPLL_CFG3_VCO_CTRL_MASK, + clk->dvfs_params->vco_ctrl << GPCPLL_CFG3_VCO_CTRL_SHIFT); + + if (fused) { + /* Start internal calibration, but ignore results */ + nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT, + GPCPLL_DVFS1_EN_DFS_CAL_BIT); + + /* got uvdev parameters from fuse, skip calibration */ + goto calibrated; + } + + /* + * If calibration parameters are not fused, start internal calibration, + * wait for completion, and use results along with default slope to + * calculate ADC offset during boot. + */ + nvkm_mask(device, GPCPLL_DVFS1, GPCPLL_DVFS1_EN_DFS_CAL_BIT, + GPCPLL_DVFS1_EN_DFS_CAL_BIT); + + /* Wait for internal calibration done (spec < 2us). */ + ret = nvkm_wait_usec(device, 10, GPCPLL_DVFS1, + GPCPLL_DVFS1_DFS_CAL_DONE_BIT, + GPCPLL_DVFS1_DFS_CAL_DONE_BIT); + if (ret < 0) { + nvkm_error(subdev, "GPCPLL calibration timeout\n"); + return -ETIMEDOUT; + } + + data = nvkm_rd32(device, GPCPLL_CFG3) >> + GPCPLL_CFG3_PLL_DFS_TESTOUT_SHIFT; + data &= MASK(GPCPLL_CFG3_PLL_DFS_TESTOUT_WIDTH); + + clk->uvdet_slope = ADC_SLOPE_UV; + clk->uvdet_offs = ((s32)clk->uv) - data * ADC_SLOPE_UV; + + nvkm_debug(subdev, "calibrated DVFS parameters: offs %d, slope %d\n", + clk->uvdet_offs, clk->uvdet_slope); + +calibrated: + /* Compute and apply initial DVFS parameters */ + gm20b_dvfs_calc_det_coeff(clk, clk->uv, &clk->dvfs); + gm20b_dvfs_program_coeff(clk, 0); + gm20b_dvfs_program_ext_cal(clk, clk->dvfs.dfs_ext_cal); + gm20b_dvfs_program_coeff(clk, clk->dvfs.dfs_coeff); + gm20b_dvfs_program_dfs_detection(clk, &clk->new_dvfs); + + return 0; +} + +/* Forward declaration to detect speedo >=1 in gm20b_clk_init() */ +static const struct nvkm_clk_func gm20b_clk; + static int gm20b_clk_init(struct nvkm_clk *base) { @@ -143,15 +814,56 @@ gm20b_clk_init(struct nvkm_clk *base) struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; int ret; + u32 data; + + /* get out from IDDQ */ + nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0); + nvkm_rd32(device, GPCPLL_CFG); + udelay(5); + + nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, + GPC2CLK_OUT_INIT_VAL); /* Set the global bypass control to VCO */ nvkm_mask(device, BYPASSCTRL_SYS, MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT, 0); + ret = gk20a_clk_setup_slide(clk); + if (ret) + return ret; + + /* If not fused, set RAM SVOP PDP data 0x2, and enable fuse override */ + data = nvkm_rd32(device, 0x021944); + if (!(data & 0x3)) { + data |= 0x2; + nvkm_wr32(device, 0x021944, data); + + data = nvkm_rd32(device, 0x021948); + data |= 0x1; + nvkm_wr32(device, 0x021948, data); + } + + /* Disable idle slow down */ + nvkm_mask(device, 0x20160, 0x003f0000, 0x0); + + /* speedo >= 1? */ + if (clk->base.func == &gm20b_clk) { + struct gm20b_clk *_clk = gm20b_clk(base); + struct nvkm_volt *volt = device->volt; + + /* Get current voltage */ + _clk->uv = nvkm_volt_get(volt); + + /* Initialize DVFS */ + ret = gm20b_clk_init_dvfs(_clk); + if (ret) + return ret; + } + /* Start with lowest frequency */ base->func->calc(base, &base->func->pstates[0].base); - ret = base->func->prog(&clk->base); + ret = base->func->prog(base); if (ret) { nvkm_error(subdev, "cannot initialize clock\n"); return ret; @@ -169,6 +881,7 @@ gm20b_clk_speedo0 = { .prog = gk20a_clk_prog, .tidy = gk20a_clk_tidy, .pstates = gm20b_pstates, + /* Speedo 0 only supports 12 voltages */ .nr_pstates = ARRAY_SIZE(gm20b_pstates) - 1, .domains = { { nv_clk_src_crystal, 0xff }, @@ -177,8 +890,26 @@ gm20b_clk_speedo0 = { }, }; -int -gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +static const struct nvkm_clk_func +gm20b_clk = { + .init = gm20b_clk_init, + .fini = gm20b_clk_fini, + .read = gk20a_clk_read, + .calc = gm20b_clk_calc, + .prog = gm20b_clk_prog, + .tidy = gk20a_clk_tidy, + .pstates = gm20b_pstates, + .nr_pstates = ARRAY_SIZE(gm20b_pstates), + .domains = { + { nv_clk_src_crystal, 0xff }, + { nv_clk_src_gpc, 0xff, 0, "core", GK20A_CLK_GPC_MDIV }, + { nv_clk_src_max }, + }, +}; + +static int +gm20b_clk_new_speedo0(struct nvkm_device *device, int index, + struct nvkm_clk **pclk) { struct gk20a_clk *clk; int ret; @@ -188,11 +919,156 @@ gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) return -ENOMEM; *pclk = &clk->base; - ret = _gk20a_clk_ctor(device, index, &gm20b_clk_speedo0, - &gm20b_pllg_params, clk); + ret = gk20a_clk_ctor(device, index, &gm20b_clk_speedo0, + &gm20b_pllg_params, clk); clk->pl_to_div = pl_to_div; clk->div_to_pl = div_to_pl; return ret; } + +/* FUSE register */ +#define FUSE_RESERVED_CALIB0 0x204 +#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT 0 +#define FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH 4 +#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT 4 +#define FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH 10 +#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT 14 +#define FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH 10 +#define FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT 24 +#define FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH 6 +#define FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT 30 +#define FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH 2 + +static int +gm20b_clk_init_fused_params(struct gm20b_clk *clk) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + u32 val = 0; + u32 rev = 0; + +#if IS_ENABLED(CONFIG_ARCH_TEGRA) + tegra_fuse_readl(FUSE_RESERVED_CALIB0, &val); + rev = (val >> FUSE_RESERVED_CALIB0_FUSE_REV_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_FUSE_REV_WIDTH); +#endif + + /* No fused parameters, we will calibrate later */ + if (rev == 0) + return -EINVAL; + + /* Integer part in mV + fractional part in uV */ + clk->uvdet_slope = ((val >> FUSE_RESERVED_CALIB0_SLOPE_INT_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_SLOPE_INT_WIDTH)) * 1000 + + ((val >> FUSE_RESERVED_CALIB0_SLOPE_FRAC_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_SLOPE_FRAC_WIDTH)); + + /* Integer part in mV + fractional part in 100uV */ + clk->uvdet_offs = ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_INT_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_INTERCEPT_INT_WIDTH)) * 1000 + + ((val >> FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_SHIFT) & + MASK(FUSE_RESERVED_CALIB0_INTERCEPT_FRAC_WIDTH)) * 100; + + nvkm_debug(subdev, "fused calibration data: slope %d, offs %d\n", + clk->uvdet_slope, clk->uvdet_offs); + return 0; +} + +static int +gm20b_clk_init_safe_fmax(struct gm20b_clk *clk) +{ + struct nvkm_subdev *subdev = &clk->base.base.subdev; + struct nvkm_volt *volt = subdev->device->volt; + struct nvkm_pstate *pstates = clk->base.base.func->pstates; + int nr_pstates = clk->base.base.func->nr_pstates; + int vmin, id = 0; + u32 fmax = 0; + int i; + + /* find lowest voltage we can use */ + vmin = volt->vid[0].uv; + for (i = 1; i < volt->vid_nr; i++) { + if (volt->vid[i].uv <= vmin) { + vmin = volt->vid[i].uv; + id = volt->vid[i].vid; + } + } + + /* find max frequency at this voltage */ + for (i = 0; i < nr_pstates; i++) + if (pstates[i].base.voltage == id) + fmax = max(fmax, + pstates[i].base.domain[nv_clk_src_gpc]); + + if (!fmax) { + nvkm_error(subdev, "failed to evaluate safe fmax\n"); + return -EINVAL; + } + + /* we are safe at 90% of the max frequency */ + clk->safe_fmax_vmin = fmax * (100 - 10) / 100; + nvkm_debug(subdev, "safe fmax @ vmin = %u Khz\n", clk->safe_fmax_vmin); + + return 0; +} + +int +gm20b_clk_new(struct nvkm_device *device, int index, struct nvkm_clk **pclk) +{ + struct nvkm_device_tegra *tdev = device->func->tegra(device); + struct gm20b_clk *clk; + struct nvkm_subdev *subdev; + struct gk20a_clk_pllg_params *clk_params; + int ret; + + /* Speedo 0 GPUs cannot use noise-aware PLL */ + if (tdev->gpu_speedo_id == 0) + return gm20b_clk_new_speedo0(device, index, pclk); + + /* Speedo >= 1, use NAPLL */ + clk = kzalloc(sizeof(*clk) + sizeof(*clk_params), GFP_KERNEL); + if (!clk) + return -ENOMEM; + *pclk = &clk->base.base; + subdev = &clk->base.base.subdev; + + /* duplicate the clock parameters since we will patch them below */ + clk_params = (void *) (clk + 1); + *clk_params = gm20b_pllg_params; + ret = gk20a_clk_ctor(device, index, &gm20b_clk, clk_params, + &clk->base); + if (ret) + return ret; + + /* + * NAPLL can only work with max_u, clamp the m range so + * gk20a_pllg_calc_mnp always uses it + */ + clk_params->max_m = clk_params->min_m = DIV_ROUND_UP(clk_params->max_u, + (clk->base.parent_rate / KHZ)); + if (clk_params->max_m == 0) { + nvkm_warn(subdev, "cannot use NAPLL, using legacy clock...\n"); + kfree(clk); + return gm20b_clk_new_speedo0(device, index, pclk); + } + + clk->base.pl_to_div = pl_to_div; + clk->base.div_to_pl = div_to_pl; + + clk->dvfs_params = &gm20b_dvfs_params; + + ret = gm20b_clk_init_fused_params(clk); + /* + * we will calibrate during init - should never happen on + * prod parts + */ + if (ret) + nvkm_warn(subdev, "no fused calibration parameters\n"); + + ret = gm20b_clk_init_safe_fmax(clk); + if (ret) + return ret; + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild index 842d5de96d73..edcc157e6ac8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/Kbuild @@ -24,6 +24,8 @@ nvkm-y += nvkm/subdev/fb/gk104.o nvkm-y += nvkm/subdev/fb/gk20a.o nvkm-y += nvkm/subdev/fb/gm107.o nvkm-y += nvkm/subdev/fb/gm200.o +nvkm-y += nvkm/subdev/fb/gp100.o +nvkm-y += nvkm/subdev/fb/gp104.o nvkm-y += nvkm/subdev/fb/ram.o nvkm-y += nvkm/subdev/fb/ramnv04.o @@ -41,6 +43,7 @@ nvkm-y += nvkm/subdev/fb/rammcp77.o nvkm-y += nvkm/subdev/fb/ramgf100.o nvkm-y += nvkm/subdev/fb/ramgk104.o nvkm-y += nvkm/subdev/fb/ramgm107.o +nvkm-y += nvkm/subdev/fb/ramgp100.o nvkm-y += nvkm/subdev/fb/sddr2.o nvkm-y += nvkm/subdev/fb/sddr3.o nvkm-y += nvkm/subdev/fb/gddr3.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c index ce90242b8cce..a7049c041594 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c @@ -25,6 +25,7 @@ #include "ram.h" #include <core/memory.h> +#include <core/option.h> #include <subdev/bios.h> #include <subdev/bios/M0203.h> #include <engine/gr.h> @@ -134,6 +135,10 @@ nvkm_fb_init(struct nvkm_subdev *subdev) if (fb->func->init) fb->func->init(fb); + if (fb->func->init_page) + fb->func->init_page(fb); + if (fb->func->init_unkn) + fb->func->init_unkn(fb); return 0; } @@ -171,6 +176,7 @@ nvkm_fb_ctor(const struct nvkm_fb_func *func, struct nvkm_device *device, nvkm_subdev_ctor(&nvkm_fb, device, index, &fb->subdev); fb->func = func; fb->tile.regions = fb->func->tile.regions; + fb->page = nvkm_longopt(device->cfgopt, "NvFbBigPage", 0); } int diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c index e649ead5ccfc..76433cc66fff 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.c @@ -72,6 +72,22 @@ gf100_fb_oneinit(struct nvkm_fb *fb) } void +gf100_fb_init_page(struct nvkm_fb *fb) +{ + struct nvkm_device *device = fb->subdev.device; + switch (fb->page) { + case 16: + nvkm_mask(device, 0x100c80, 0x00000001, 0x00000001); + break; + case 17: + default: + nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); + fb->page = 17; + break; + } +} + +void gf100_fb_init(struct nvkm_fb *base) { struct gf100_fb *fb = gf100_fb(base); @@ -79,8 +95,6 @@ gf100_fb_init(struct nvkm_fb *base) if (fb->r100c10_page) nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8); - - nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */ } void * @@ -125,6 +139,7 @@ gf100_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gf100_fb_init, + .init_page = gf100_fb_init_page, .intr = gf100_fb_intr, .ram_new = gf100_ram_new, .memtype_valid = gf100_fb_memtype_valid, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h index 2160e5a39c9a..449f431644b3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gf100.h @@ -14,4 +14,6 @@ int gf100_fb_new_(const struct nvkm_fb_func *, struct nvkm_device *, void *gf100_fb_dtor(struct nvkm_fb *); void gf100_fb_init(struct nvkm_fb *); void gf100_fb_intr(struct nvkm_fb *); + +void gp100_fb_init(struct nvkm_fb *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c index b41f0f70038c..4245e2e6e604 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk104.c @@ -29,6 +29,7 @@ gk104_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gf100_fb_init, + .init_page = gf100_fb_init_page, .intr = gf100_fb_intr, .ram_new = gk104_ram_new, .memtype_valid = gf100_fb_memtype_valid, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c index 7306f7dfc3b9..f815fe2bbf08 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gk20a.c @@ -27,7 +27,6 @@ static void gk20a_fb_init(struct nvkm_fb *fb) { struct nvkm_device *device = fb->subdev.device; - nvkm_mask(device, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */ nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->mmu_wr) >> 8); nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->mmu_rd) >> 8); } @@ -36,6 +35,7 @@ static const struct nvkm_fb_func gk20a_fb = { .oneinit = gf100_fb_oneinit, .init = gk20a_fb_init, + .init_page = gf100_fb_init_page, .memtype_valid = gf100_fb_memtype_valid, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c index 4869fdb753c9..db699025f546 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm107.c @@ -29,6 +29,7 @@ gm107_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gf100_fb_init, + .init_page = gf100_fb_init_page, .intr = gf100_fb_intr, .ram_new = gm107_ram_new, .memtype_valid = gf100_fb_memtype_valid, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c index 44f5716f64d8..62f653240be3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gm200.c @@ -26,6 +26,24 @@ #include <core/memory.h> +void +gm200_fb_init_page(struct nvkm_fb *fb) +{ + struct nvkm_device *device = fb->subdev.device; + switch (fb->page) { + case 16: + nvkm_mask(device, 0x100c80, 0x00000801, 0x00000001); + break; + case 17: + nvkm_mask(device, 0x100c80, 0x00000801, 0x00000000); + break; + default: + nvkm_mask(device, 0x100c80, 0x00000800, 0x00000800); + fb->page = 0; + break; + } +} + static void gm200_fb_init(struct nvkm_fb *base) { @@ -48,6 +66,7 @@ gm200_fb = { .dtor = gf100_fb_dtor, .oneinit = gf100_fb_oneinit, .init = gm200_fb_init, + .init_page = gm200_fb_init_page, .intr = gf100_fb_intr, .ram_new = gm107_ram_new, .memtype_valid = gf100_fb_memtype_valid, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c new file mode 100644 index 000000000000..98474aec1921 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c @@ -0,0 +1,69 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "gf100.h" +#include "ram.h" + +#include <core/memory.h> + +static void +gp100_fb_init_unkn(struct nvkm_fb *base) +{ + struct nvkm_device *device = gf100_fb(base)->base.subdev.device; + nvkm_wr32(device, 0x1fac80, nvkm_rd32(device, 0x100c80)); + nvkm_wr32(device, 0x1facc4, nvkm_rd32(device, 0x100cc4)); + nvkm_wr32(device, 0x1facc8, nvkm_rd32(device, 0x100cc8)); + nvkm_wr32(device, 0x1faccc, nvkm_rd32(device, 0x100ccc)); +} + +void +gp100_fb_init(struct nvkm_fb *base) +{ + struct gf100_fb *fb = gf100_fb(base); + struct nvkm_device *device = fb->base.subdev.device; + + if (fb->r100c10_page) + nvkm_wr32(device, 0x100c10, fb->r100c10 >> 8); + + nvkm_wr32(device, 0x100cc8, nvkm_memory_addr(fb->base.mmu_wr) >> 8); + nvkm_wr32(device, 0x100ccc, nvkm_memory_addr(fb->base.mmu_rd) >> 8); + nvkm_mask(device, 0x100cc4, 0x00060000, + max(nvkm_memory_size(fb->base.mmu_rd) >> 16, (u64)2) << 17); +} + +static const struct nvkm_fb_func +gp100_fb = { + .dtor = gf100_fb_dtor, + .oneinit = gf100_fb_oneinit, + .init = gp100_fb_init, + .init_page = gm200_fb_init_page, + .init_unkn = gp100_fb_init_unkn, + .ram_new = gp100_ram_new, + .memtype_valid = gf100_fb_memtype_valid, +}; + +int +gp100_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +{ + return gf100_fb_new_(&gp100_fb, device, index, pfb); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c new file mode 100644 index 000000000000..92cb71861bec --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp104.c @@ -0,0 +1,43 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "gf100.h" +#include "ram.h" + +#include <core/memory.h> + +static const struct nvkm_fb_func +gp104_fb = { + .dtor = gf100_fb_dtor, + .oneinit = gf100_fb_oneinit, + .init = gp100_fb_init, + .init_page = gm200_fb_init_page, + .ram_new = gp100_ram_new, + .memtype_valid = gf100_fb_memtype_valid, +}; + +int +gp104_fb_new(struct nvkm_device *device, int index, struct nvkm_fb **pfb) +{ + return gf100_fb_new_(&gp104_fb, device, index, pfb); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h index d97d640e60a0..e905d44fa1d5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h @@ -8,6 +8,8 @@ struct nvkm_fb_func { void *(*dtor)(struct nvkm_fb *); int (*oneinit)(struct nvkm_fb *); void (*init)(struct nvkm_fb *); + void (*init_page)(struct nvkm_fb *); + void (*init_unkn)(struct nvkm_fb *); void (*intr)(struct nvkm_fb *); struct { @@ -60,5 +62,8 @@ void nv46_fb_tile_init(struct nvkm_fb *, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nvkm_fb_tile *); int gf100_fb_oneinit(struct nvkm_fb *); +void gf100_fb_init_page(struct nvkm_fb *); bool gf100_fb_memtype_valid(struct nvkm_fb *, u32); + +void gm200_fb_init_page(struct nvkm_fb *); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h index f816cbf2ced3..b9ec0ae6723a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ram.h @@ -47,4 +47,5 @@ int mcp77_ram_new(struct nvkm_fb *, struct nvkm_ram **); int gf100_ram_new(struct nvkm_fb *, struct nvkm_ram **); int gk104_ram_new(struct nvkm_fb *, struct nvkm_ram **); int gm107_ram_new(struct nvkm_fb *, struct nvkm_ram **); +int gp100_ram_new(struct nvkm_fb *, struct nvkm_ram **); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c new file mode 100644 index 000000000000..f3be408b5e5e --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ramgp100.c @@ -0,0 +1,146 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "ram.h" + +#include <subdev/bios.h> +#include <subdev/bios/init.h> +#include <subdev/bios/rammap.h> + +static int +gp100_ram_init(struct nvkm_ram *ram) +{ + struct nvkm_subdev *subdev = &ram->fb->subdev; + struct nvkm_device *device = subdev->device; + struct nvkm_bios *bios = device->bios; + u8 ver, hdr, cnt, len, snr, ssz; + u32 data; + int i; + + /* run a bunch of tables from rammap table. there's actually + * individual pointers for each rammap entry too, but, nvidia + * seem to just run the last two entries' scripts early on in + * their init, and never again.. we'll just run 'em all once + * for now. + * + * i strongly suspect that each script is for a separate mode + * (likely selected by 0x9a065c's lower bits?), and the + * binary driver skips the one that's already been setup by + * the init tables. + */ + data = nvbios_rammapTe(bios, &ver, &hdr, &cnt, &len, &snr, &ssz); + if (!data || hdr < 0x15) + return -EINVAL; + + cnt = nvbios_rd08(bios, data + 0x14); /* guess at count */ + data = nvbios_rd32(bios, data + 0x10); /* guess u32... */ + if (cnt) { + u32 save = nvkm_rd32(device, 0x9a065c) & 0x000000f0; + for (i = 0; i < cnt; i++, data += 4) { + if (i != save >> 4) { + nvkm_mask(device, 0x9a065c, 0x000000f0, i << 4); + nvbios_exec(&(struct nvbios_init) { + .subdev = subdev, + .bios = bios, + .offset = nvbios_rd32(bios, data), + .execute = 1, + }); + } + } + nvkm_mask(device, 0x9a065c, 0x000000f0, save); + } + + nvkm_mask(device, 0x9a0584, 0x11000000, 0x00000000); + nvkm_wr32(device, 0x10ecc0, 0xffffffff); + nvkm_mask(device, 0x9a0160, 0x00000010, 0x00000010); + return 0; +} + +static const struct nvkm_ram_func +gp100_ram_func = { + .init = gp100_ram_init, + .get = gf100_ram_get, + .put = gf100_ram_put, +}; + +int +gp100_ram_new(struct nvkm_fb *fb, struct nvkm_ram **pram) +{ + struct nvkm_ram *ram; + struct nvkm_subdev *subdev = &fb->subdev; + struct nvkm_device *device = subdev->device; + enum nvkm_ram_type type = nvkm_fb_bios_memtype(device->bios); + const u32 rsvd_head = ( 256 * 1024); /* vga memory */ + const u32 rsvd_tail = (1024 * 1024); /* vbios etc */ + u32 fbpa_num = nvkm_rd32(device, 0x022438), fbpa; + u32 fbio_opt = nvkm_rd32(device, 0x021c14); + u64 part, size = 0, comm = ~0ULL; + bool mixed = false; + int ret; + + nvkm_debug(subdev, "022438: %08x\n", fbpa_num); + nvkm_debug(subdev, "021c14: %08x\n", fbio_opt); + for (fbpa = 0; fbpa < fbpa_num; fbpa++) { + if (!(fbio_opt & (1 << fbpa))) { + part = nvkm_rd32(device, 0x90020c + (fbpa * 0x4000)); + nvkm_debug(subdev, "fbpa %02x: %lld MiB\n", fbpa, part); + part = part << 20; + if (part != comm) { + if (comm != ~0ULL) + mixed = true; + comm = min(comm, part); + } + size = size + part; + } + } + + ret = nvkm_ram_new_(&gp100_ram_func, fb, type, size, 0, &ram); + *pram = ram; + if (ret) + return ret; + + nvkm_mm_fini(&ram->vram); + + if (mixed) { + ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT, + ((comm * fbpa_num) - rsvd_head) >> + NVKM_RAM_MM_SHIFT, 1); + if (ret) + return ret; + + ret = nvkm_mm_init(&ram->vram, (0x1000000000ULL + comm) >> + NVKM_RAM_MM_SHIFT, + (size - (comm * fbpa_num) - rsvd_tail) >> + NVKM_RAM_MM_SHIFT, 1); + if (ret) + return ret; + } else { + ret = nvkm_mm_init(&ram->vram, rsvd_head >> NVKM_RAM_MM_SHIFT, + (size - rsvd_head - rsvd_tail) >> + NVKM_RAM_MM_SHIFT, 1); + if (ret) + return ret; + } + + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c index 323c79abe468..41bd5d0f7692 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/iccsense/base.c @@ -276,6 +276,8 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev) struct pwr_rail_t *r = &stbl.rail[i]; struct nvkm_iccsense_rail *rail; struct nvkm_iccsense_sensor *sensor; + int (*read)(struct nvkm_iccsense *, + struct nvkm_iccsense_rail *); if (!r->mode || r->resistor_mohm == 0) continue; @@ -284,31 +286,31 @@ nvkm_iccsense_oneinit(struct nvkm_subdev *subdev) if (!sensor) continue; - rail = kmalloc(sizeof(*rail), GFP_KERNEL); - if (!rail) - return -ENOMEM; - switch (sensor->type) { case NVBIOS_EXTDEV_INA209: if (r->rail != 0) continue; - rail->read = nvkm_iccsense_ina209_read; + read = nvkm_iccsense_ina209_read; break; case NVBIOS_EXTDEV_INA219: if (r->rail != 0) continue; - rail->read = nvkm_iccsense_ina219_read; + read = nvkm_iccsense_ina219_read; break; case NVBIOS_EXTDEV_INA3221: if (r->rail >= 3) continue; - rail->read = nvkm_iccsense_ina3221_read; + read = nvkm_iccsense_ina3221_read; break; default: continue; } + rail = kmalloc(sizeof(*rail), GFP_KERNEL); + if (!rail) + return -ENOMEM; sensor->rail_mask |= 1 << r->rail; + rail->read = read; rail->sensor = sensor; rail->idx = r->rail; rail->mohm = r->resistor_mohm; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild index 932b366598aa..12d6f4f102cb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/Kbuild @@ -3,3 +3,4 @@ nvkm-y += nvkm/subdev/ltc/gf100.o nvkm-y += nvkm/subdev/ltc/gk104.o nvkm-y += nvkm/subdev/ltc/gm107.o nvkm-y += nvkm/subdev/ltc/gm200.o +nvkm-y += nvkm/subdev/ltc/gp100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c index c9eb677967a8..4a0fa0a9b802 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gf100.c @@ -23,7 +23,6 @@ */ #include "priv.h" -#include <core/enum.h> #include <subdev/fb.h> #include <subdev/timer.h> @@ -71,7 +70,7 @@ gf100_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth) nvkm_wr32(device, 0x17ea58, depth); } -static const struct nvkm_bitfield +const struct nvkm_bitfield gf100_ltc_lts_intr_name[] = { { 0x00000001, "IDLE_ERROR_IQ" }, { 0x00000002, "IDLE_ERROR_CBC" }, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c index e292f5679418..ec0a3844b2d1 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm107.c @@ -68,18 +68,22 @@ gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth) nvkm_wr32(device, 0x17e34c, depth); } -static void -gm107_ltc_lts_isr(struct nvkm_ltc *ltc, int c, int s) +void +gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s) { struct nvkm_subdev *subdev = <c->subdev; struct nvkm_device *device = subdev->device; - u32 base = 0x140000 + (c * 0x2000) + (s * 0x200); - u32 stat = nvkm_rd32(device, base + 0x00c); + u32 base = 0x140400 + (c * 0x2000) + (s * 0x200); + u32 intr = nvkm_rd32(device, base + 0x00c); + u16 stat = intr & 0x0000ffff; + char msg[128]; if (stat) { - nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat); - nvkm_wr32(device, base + 0x00c, stat); + nvkm_snprintbf(msg, sizeof(msg), gf100_ltc_lts_intr_name, stat); + nvkm_error(subdev, "LTC%d_LTS%d: %08x [%s]\n", c, s, intr, msg); } + + nvkm_wr32(device, base + 0x00c, intr); } void @@ -92,7 +96,7 @@ gm107_ltc_intr(struct nvkm_ltc *ltc) while (mask) { u32 s, c = __ffs(mask); for (s = 0; s < ltc->lts_nr; s++) - gm107_ltc_lts_isr(ltc, c, s); + gm107_ltc_intr_lts(ltc, c, s); mask &= ~(1 << c); } } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c index 2a29bfd5125a..e18e0dc19ec8 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gm200.c @@ -46,7 +46,7 @@ static const struct nvkm_ltc_func gm200_ltc = { .oneinit = gm200_ltc_oneinit, .init = gm200_ltc_init, - .intr = gm107_ltc_intr, /*XXX: not validated */ + .intr = gm107_ltc_intr, .cbc_clear = gm107_ltc_cbc_clear, .cbc_wait = gm107_ltc_cbc_wait, .zbc = 16, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c new file mode 100644 index 000000000000..0bdfb2f40266 --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/gp100.c @@ -0,0 +1,75 @@ +/* + * Copyright 2016 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#include "priv.h" + +static void +gp100_ltc_intr(struct nvkm_ltc *ltc) +{ + struct nvkm_device *device = ltc->subdev.device; + u32 mask; + + mask = nvkm_rd32(device, 0x0001c0); + while (mask) { + u32 s, c = __ffs(mask); + for (s = 0; s < ltc->lts_nr; s++) + gm107_ltc_intr_lts(ltc, c, s); + mask &= ~(1 << c); + } +} + +static int +gp100_ltc_oneinit(struct nvkm_ltc *ltc) +{ + struct nvkm_device *device = ltc->subdev.device; + ltc->ltc_nr = nvkm_rd32(device, 0x12006c); + ltc->lts_nr = nvkm_rd32(device, 0x17e280) >> 28; + /*XXX: tagram allocation - TBD */ + return nvkm_mm_init(<c->tags, 0, 0, 1); +} + +static void +gp100_ltc_init(struct nvkm_ltc *ltc) +{ + /*XXX: PMU LS call to setup tagram address */ +} + +static const struct nvkm_ltc_func +gp100_ltc = { + .oneinit = gp100_ltc_oneinit, + .init = gp100_ltc_init, + .intr = gp100_ltc_intr, + .cbc_clear = gm107_ltc_cbc_clear, + .cbc_wait = gm107_ltc_cbc_wait, + .zbc = 16, + .zbc_clear_color = gm107_ltc_zbc_clear_color, + .zbc_clear_depth = gm107_ltc_zbc_clear_depth, + .invalidate = gf100_ltc_invalidate, + .flush = gf100_ltc_flush, +}; + +int +gp100_ltc_new(struct nvkm_device *device, int index, struct nvkm_ltc **pltc) +{ + return nvkm_ltc_new_(&gp100_ltc, device, index, pltc); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h index 6d81c695ed0d..8b95f96e3ffa 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ltc/priv.h @@ -2,6 +2,7 @@ #define __NVKM_LTC_PRIV_H__ #define nvkm_ltc(p) container_of((p), struct nvkm_ltc, subdev) #include <subdev/ltc.h> +#include <core/enum.h> int nvkm_ltc_new_(const struct nvkm_ltc_func *, struct nvkm_device *, int index, struct nvkm_ltc **); @@ -31,8 +32,10 @@ void gf100_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]); void gf100_ltc_zbc_clear_depth(struct nvkm_ltc *, int, const u32); void gf100_ltc_invalidate(struct nvkm_ltc *); void gf100_ltc_flush(struct nvkm_ltc *); +extern const struct nvkm_bitfield gf100_ltc_lts_intr_name[]; void gm107_ltc_intr(struct nvkm_ltc *); +void gm107_ltc_intr_lts(struct nvkm_ltc *, int ltc, int lts); void gm107_ltc_cbc_clear(struct nvkm_ltc *, u32, u32); void gm107_ltc_cbc_wait(struct nvkm_ltc *); void gm107_ltc_zbc_clear_color(struct nvkm_ltc *, int, const u32[4]); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild index 49695ac7be2e..12943f92c206 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/Kbuild @@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/mc/gt215.o nvkm-y += nvkm/subdev/mc/gf100.o nvkm-y += nvkm/subdev/mc/gk104.o nvkm-y += nvkm/subdev/mc/gk20a.o +nvkm-y += nvkm/subdev/mc/gp100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c index 350a8caa84c8..6b25e25f9eba 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/base.c @@ -27,43 +27,67 @@ #include <subdev/top.h> void -nvkm_mc_unk260(struct nvkm_mc *mc, u32 data) +nvkm_mc_unk260(struct nvkm_device *device, u32 data) { - if (mc->func->unk260) + struct nvkm_mc *mc = device->mc; + if (likely(mc) && mc->func->unk260) mc->func->unk260(mc, data); } void -nvkm_mc_intr_unarm(struct nvkm_mc *mc) +nvkm_mc_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx, bool en) { - return mc->func->intr_unarm(mc); + struct nvkm_mc *mc = device->mc; + const struct nvkm_mc_map *map; + if (likely(mc) && mc->func->intr_mask) { + u32 mask = nvkm_top_intr_mask(device, devidx); + for (map = mc->func->intr; !mask && map->stat; map++) { + if (map->unit == devidx) + mask = map->stat; + } + mc->func->intr_mask(mc, mask, en ? mask : 0); + } +} + +void +nvkm_mc_intr_unarm(struct nvkm_device *device) +{ + struct nvkm_mc *mc = device->mc; + if (likely(mc)) + mc->func->intr_unarm(mc); } void -nvkm_mc_intr_rearm(struct nvkm_mc *mc) +nvkm_mc_intr_rearm(struct nvkm_device *device) { - return mc->func->intr_rearm(mc); + struct nvkm_mc *mc = device->mc; + if (likely(mc)) + mc->func->intr_rearm(mc); } static u32 -nvkm_mc_intr_mask(struct nvkm_mc *mc) +nvkm_mc_intr_stat(struct nvkm_mc *mc) { - u32 intr = mc->func->intr_mask(mc); + u32 intr = mc->func->intr_stat(mc); if (WARN_ON_ONCE(intr == 0xffffffff)) intr = 0; /* likely fallen off the bus */ return intr; } void -nvkm_mc_intr(struct nvkm_mc *mc, bool *handled) +nvkm_mc_intr(struct nvkm_device *device, bool *handled) { - struct nvkm_device *device = mc->subdev.device; + struct nvkm_mc *mc = device->mc; struct nvkm_subdev *subdev; - const struct nvkm_mc_map *map = mc->func->intr; - u32 stat, intr = nvkm_mc_intr_mask(mc); + const struct nvkm_mc_map *map; + u32 stat, intr; u64 subdevs; - stat = nvkm_top_intr(device->top, intr, &subdevs); + if (unlikely(!mc)) + return; + + intr = nvkm_mc_intr_stat(mc); + stat = nvkm_top_intr(device, intr, &subdevs); while (subdevs) { enum nvkm_devidx subidx = __ffs64(subdevs); subdev = nvkm_device_subdev(device, subidx); @@ -72,14 +96,13 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled) subdevs &= ~BIT_ULL(subidx); } - while (map->stat) { + for (map = mc->func->intr; map->stat; map++) { if (intr & map->stat) { subdev = nvkm_device_subdev(device, map->unit); if (subdev) nvkm_subdev_intr(subdev); stat &= ~map->stat; } - map++; } if (stat) @@ -87,22 +110,32 @@ nvkm_mc_intr(struct nvkm_mc *mc, bool *handled) *handled = intr != 0; } -static void -nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx) +static u32 +nvkm_mc_reset_mask(struct nvkm_device *device, bool isauto, + enum nvkm_devidx devidx) { - struct nvkm_device *device = mc->subdev.device; + struct nvkm_mc *mc = device->mc; const struct nvkm_mc_map *map; - u64 pmc_enable; - - if (!(pmc_enable = nvkm_top_reset(device->top, devidx))) { - for (map = mc->func->reset; map && map->stat; map++) { - if (map->unit == devidx) { - pmc_enable = map->stat; - break; + u64 pmc_enable = 0; + if (likely(mc)) { + if (!(pmc_enable = nvkm_top_reset(device, devidx))) { + for (map = mc->func->reset; map && map->stat; map++) { + if (!isauto || !map->noauto) { + if (map->unit == devidx) { + pmc_enable = map->stat; + break; + } + } } } } + return pmc_enable; +} +void +nvkm_mc_reset(struct nvkm_device *device, enum nvkm_devidx devidx) +{ + u64 pmc_enable = nvkm_mc_reset_mask(device, true, devidx); if (pmc_enable) { nvkm_mask(device, 0x000200, pmc_enable, 0x00000000); nvkm_mask(device, 0x000200, pmc_enable, pmc_enable); @@ -111,17 +144,27 @@ nvkm_mc_reset_(struct nvkm_mc *mc, enum nvkm_devidx devidx) } void -nvkm_mc_reset(struct nvkm_mc *mc, enum nvkm_devidx devidx) +nvkm_mc_disable(struct nvkm_device *device, enum nvkm_devidx devidx) { - if (likely(mc)) - nvkm_mc_reset_(mc, devidx); + u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx); + if (pmc_enable) + nvkm_mask(device, 0x000200, pmc_enable, 0x00000000); +} + +void +nvkm_mc_enable(struct nvkm_device *device, enum nvkm_devidx devidx) +{ + u64 pmc_enable = nvkm_mc_reset_mask(device, false, devidx); + if (pmc_enable) { + nvkm_mask(device, 0x000200, pmc_enable, pmc_enable); + nvkm_rd32(device, 0x000200); + } } static int nvkm_mc_fini(struct nvkm_subdev *subdev, bool suspend) { - struct nvkm_mc *mc = nvkm_mc(subdev); - nvkm_mc_intr_unarm(mc); + nvkm_mc_intr_unarm(subdev->device); return 0; } @@ -131,7 +174,7 @@ nvkm_mc_init(struct nvkm_subdev *subdev) struct nvkm_mc *mc = nvkm_mc(subdev); if (mc->func->init) mc->func->init(mc); - nvkm_mc_intr_rearm(mc); + nvkm_mc_intr_rearm(subdev->device); return 0; } @@ -148,16 +191,21 @@ nvkm_mc = { .fini = nvkm_mc_fini, }; +void +nvkm_mc_ctor(const struct nvkm_mc_func *func, struct nvkm_device *device, + int index, struct nvkm_mc *mc) +{ + nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev); + mc->func = func; +} + int nvkm_mc_new_(const struct nvkm_mc_func *func, struct nvkm_device *device, int index, struct nvkm_mc **pmc) { struct nvkm_mc *mc; - if (!(mc = *pmc = kzalloc(sizeof(*mc), GFP_KERNEL))) return -ENOMEM; - - nvkm_subdev_ctor(&nvkm_mc, device, index, &mc->subdev); - mc->func = func; + nvkm_mc_ctor(func, device, index, *pmc); return 0; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c index 5c85b47f071d..c3d66ef5dc12 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g84.c @@ -57,7 +57,7 @@ g84_mc = { .intr = g84_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = g84_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c index 0280b43cc10c..93ad4982ce5f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/g98.c @@ -57,7 +57,7 @@ g98_mc = { .intr = g98_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = g98_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c index 8397e223bd43..d2c4d6033abb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gf100.c @@ -76,7 +76,7 @@ gf100_mc_intr_rearm(struct nvkm_mc *mc) } u32 -gf100_mc_intr_mask(struct nvkm_mc *mc) +gf100_mc_intr_stat(struct nvkm_mc *mc) { struct nvkm_device *device = mc->subdev.device; u32 intr0 = nvkm_rd32(device, 0x000100); @@ -85,6 +85,14 @@ gf100_mc_intr_mask(struct nvkm_mc *mc) } void +gf100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat) +{ + struct nvkm_device *device = mc->subdev.device; + nvkm_mask(device, 0x000640, mask, stat); + nvkm_mask(device, 0x000644, mask, stat); +} + +void gf100_mc_unk260(struct nvkm_mc *mc, u32 data) { nvkm_wr32(mc->subdev.device, 0x000260, data); @@ -97,6 +105,7 @@ gf100_mc = { .intr_unarm = gf100_mc_intr_unarm, .intr_rearm = gf100_mc_intr_rearm, .intr_mask = gf100_mc_intr_mask, + .intr_stat = gf100_mc_intr_stat, .reset = gf100_mc_reset, .unk260 = gf100_mc_unk260, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c index 317464212c7d..7b8c6ecad1a5 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk104.c @@ -26,6 +26,7 @@ const struct nvkm_mc_map gk104_mc_reset[] = { { 0x00000100, NVKM_ENGINE_FIFO }, + { 0x00002000, NVKM_SUBDEV_PMU, true }, {} }; @@ -53,6 +54,7 @@ gk104_mc = { .intr_unarm = gf100_mc_intr_unarm, .intr_rearm = gf100_mc_intr_rearm, .intr_mask = gf100_mc_intr_mask, + .intr_stat = gf100_mc_intr_stat, .reset = gk104_mc_reset, .unk260 = gf100_mc_unk260, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c index 60b044f517ed..ca1bf3279dbe 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gk20a.c @@ -30,6 +30,7 @@ gk20a_mc = { .intr_unarm = gf100_mc_intr_unarm, .intr_rearm = gf100_mc_intr_rearm, .intr_mask = gf100_mc_intr_mask, + .intr_stat = gf100_mc_intr_stat, .reset = gk104_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c new file mode 100644 index 000000000000..4d22f4abd6de --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gp100.c @@ -0,0 +1,103 @@ +/* + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs + */ +#define gp100_mc(p) container_of((p), struct gp100_mc, base) +#include "priv.h" + +struct gp100_mc { + struct nvkm_mc base; + spinlock_t lock; + bool intr; + u32 mask; +}; + +static void +gp100_mc_intr_update(struct gp100_mc *mc) +{ + struct nvkm_device *device = mc->base.subdev.device; + u32 mask = mc->intr ? mc->mask : 0, i; + for (i = 0; i < 2; i++) { + nvkm_wr32(device, 0x000180 + (i * 0x04), ~mask); + nvkm_wr32(device, 0x000160 + (i * 0x04), mask); + } +} + +static void +gp100_mc_intr_unarm(struct nvkm_mc *base) +{ + struct gp100_mc *mc = gp100_mc(base); + unsigned long flags; + spin_lock_irqsave(&mc->lock, flags); + mc->intr = false; + gp100_mc_intr_update(mc); + spin_unlock_irqrestore(&mc->lock, flags); +} + +static void +gp100_mc_intr_rearm(struct nvkm_mc *base) +{ + struct gp100_mc *mc = gp100_mc(base); + unsigned long flags; + spin_lock_irqsave(&mc->lock, flags); + mc->intr = true; + gp100_mc_intr_update(mc); + spin_unlock_irqrestore(&mc->lock, flags); +} + +static void +gp100_mc_intr_mask(struct nvkm_mc *base, u32 mask, u32 intr) +{ + struct gp100_mc *mc = gp100_mc(base); + unsigned long flags; + spin_lock_irqsave(&mc->lock, flags); + mc->mask = (mc->mask & ~mask) | intr; + gp100_mc_intr_update(mc); + spin_unlock_irqrestore(&mc->lock, flags); +} + +static const struct nvkm_mc_func +gp100_mc = { + .init = nv50_mc_init, + .intr = gk104_mc_intr, + .intr_unarm = gp100_mc_intr_unarm, + .intr_rearm = gp100_mc_intr_rearm, + .intr_mask = gp100_mc_intr_mask, + .intr_stat = gf100_mc_intr_stat, + .reset = gk104_mc_reset, +}; + +int +gp100_mc_new(struct nvkm_device *device, int index, struct nvkm_mc **pmc) +{ + struct gp100_mc *mc; + + if (!(mc = kzalloc(sizeof(*mc), GFP_KERNEL))) + return -ENOMEM; + nvkm_mc_ctor(&gp100_mc, device, index, &mc->base); + *pmc = &mc->base; + + spin_lock_init(&mc->lock); + mc->intr = false; + mc->mask = 0x7fffffff; + return 0; +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c index aad0ba95bf18..99d50a3d956f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/gt215.c @@ -53,13 +53,20 @@ gt215_mc_intr[] = { {}, }; +static void +gt215_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat) +{ + nvkm_mask(mc->subdev.device, 0x000640, mask, stat); +} + static const struct nvkm_mc_func gt215_mc = { .init = nv50_mc_init, .intr = gt215_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_mask = gt215_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = gt215_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c index a062624e906b..6509defd1460 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv04.c @@ -56,7 +56,7 @@ nv04_mc_intr_rearm(struct nvkm_mc *mc) } u32 -nv04_mc_intr_mask(struct nvkm_mc *mc) +nv04_mc_intr_stat(struct nvkm_mc *mc) { return nvkm_rd32(mc->subdev.device, 0x000100); } @@ -75,7 +75,7 @@ nv04_mc = { .intr = nv04_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = nv04_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c index 55f0b9166b52..9213107901e6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv11.c @@ -39,7 +39,7 @@ nv11_mc = { .intr = nv11_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = nv04_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c index c40fa67f79a5..64bf5bbf8146 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv17.c @@ -48,7 +48,7 @@ nv17_mc = { .intr = nv17_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = nv17_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c index cc56271db564..65fa44a64b98 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv44.c @@ -43,7 +43,7 @@ nv44_mc = { .intr = nv17_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = nv17_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c index 343b6078580d..fe93b4fd7100 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/nv50.c @@ -50,7 +50,7 @@ nv50_mc = { .intr = nv50_mc_intr, .intr_unarm = nv04_mc_intr_unarm, .intr_rearm = nv04_mc_intr_rearm, - .intr_mask = nv04_mc_intr_mask, + .intr_stat = nv04_mc_intr_stat, .reset = nv17_mc_reset, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h index a12038118512..4f0576a06d24 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/mc/priv.h @@ -3,12 +3,15 @@ #define nvkm_mc(p) container_of((p), struct nvkm_mc, subdev) #include <subdev/mc.h> +void nvkm_mc_ctor(const struct nvkm_mc_func *, struct nvkm_device *, + int index, struct nvkm_mc *); int nvkm_mc_new_(const struct nvkm_mc_func *, struct nvkm_device *, int index, struct nvkm_mc **); struct nvkm_mc_map { u32 stat; u32 unit; + bool noauto; }; struct nvkm_mc_func { @@ -18,8 +21,10 @@ struct nvkm_mc_func { void (*intr_unarm)(struct nvkm_mc *); /* enable reporting of interrupts to host */ void (*intr_rearm)(struct nvkm_mc *); + /* (un)mask delivery of specific interrupts */ + void (*intr_mask)(struct nvkm_mc *, u32 mask, u32 stat); /* retrieve pending interrupt mask (NV_PMC_INTR) */ - u32 (*intr_mask)(struct nvkm_mc *); + u32 (*intr_stat)(struct nvkm_mc *); const struct nvkm_mc_map *reset; void (*unk260)(struct nvkm_mc *, u32); }; @@ -27,7 +32,7 @@ struct nvkm_mc_func { void nv04_mc_init(struct nvkm_mc *); void nv04_mc_intr_unarm(struct nvkm_mc *); void nv04_mc_intr_rearm(struct nvkm_mc *); -u32 nv04_mc_intr_mask(struct nvkm_mc *); +u32 nv04_mc_intr_stat(struct nvkm_mc *); extern const struct nvkm_mc_map nv04_mc_reset[]; extern const struct nvkm_mc_map nv17_mc_intr[]; @@ -39,7 +44,8 @@ void nv50_mc_init(struct nvkm_mc *); void gf100_mc_intr_unarm(struct nvkm_mc *); void gf100_mc_intr_rearm(struct nvkm_mc *); -u32 gf100_mc_intr_mask(struct nvkm_mc *); +void gf100_mc_intr_mask(struct nvkm_mc *, u32, u32); +u32 gf100_mc_intr_stat(struct nvkm_mc *); void gf100_mc_unk260(struct nvkm_mc *, u32); extern const struct nvkm_mc_map gk104_mc_intr[]; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild index 3c2519fdeb81..2a31b7d66a6d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/Kbuild @@ -10,3 +10,4 @@ nvkm-y += nvkm/subdev/pci/g94.o nvkm-y += nvkm/subdev/pci/gf100.o nvkm-y += nvkm/subdev/pci/gf106.o nvkm-y += nvkm/subdev/pci/gk104.o +nvkm-y += nvkm/subdev/pci/gp100.o diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c index 6b0328bd7eed..eb9b278198b2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/base.c @@ -69,15 +69,13 @@ static irqreturn_t nvkm_pci_intr(int irq, void *arg) { struct nvkm_pci *pci = arg; - struct nvkm_mc *mc = pci->subdev.device->mc; + struct nvkm_device *device = pci->subdev.device; bool handled = false; - if (likely(mc)) { - nvkm_mc_intr_unarm(mc); - if (pci->msi) - pci->func->msi_rearm(pci); - nvkm_mc_intr(mc, &handled); - nvkm_mc_intr_rearm(mc); - } + nvkm_mc_intr_unarm(device); + if (pci->msi) + pci->func->msi_rearm(pci); + nvkm_mc_intr(device, &handled); + nvkm_mc_intr_rearm(device); return handled ? IRQ_HANDLED : IRQ_NONE; } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c new file mode 100644 index 000000000000..82c5234a06ff --- /dev/null +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/pci/gp100.c @@ -0,0 +1,44 @@ +/* + * Copyright 2015 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Ben Skeggs <bskeggs@redhat.com> + */ +#include "priv.h" + +static void +gp100_pci_msi_rearm(struct nvkm_pci *pci) +{ + nvkm_pci_wr32(pci, 0x0704, 0x00000000); +} + +static const struct nvkm_pci_func +gp100_pci_func = { + .rd32 = nv40_pci_rd32, + .wr08 = nv40_pci_wr08, + .wr32 = nv40_pci_wr32, + .msi_rearm = gp100_pci_msi_rearm, +}; + +int +gp100_pci_new(struct nvkm_device *device, int index, struct nvkm_pci **ppci) +{ + return nvkm_pci_new_(&gp100_pci_func, device, index, ppci); +} diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c index 213fdba6cfa0..314be2192b7d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/base.c @@ -19,8 +19,9 @@ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ - #include "priv.h" + +#include <subdev/mc.h> #include <subdev/timer.h> static const char * @@ -70,12 +71,11 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb) int ret; /* enable engine */ - nvkm_mask(device, 0x200, sb->enable_mask, sb->enable_mask); - nvkm_rd32(device, 0x200); + nvkm_mc_enable(device, sb->devidx); ret = nvkm_wait_msec(device, 10, sb->base + 0x10c, 0x6, 0x0); if (ret < 0) { - nvkm_mask(device, 0x200, sb->enable_mask, 0x0); nvkm_error(&sb->subdev, "Falcon mem scrubbing timeout\n"); + nvkm_mc_disable(device, sb->devidx); return ret; } @@ -85,8 +85,7 @@ nvkm_secboot_falcon_enable(struct nvkm_secboot *sb) /* enable IRQs */ nvkm_wr32(device, sb->base + 0x010, 0xff); - nvkm_mask(device, 0x640, sb->irq_mask, sb->irq_mask); - nvkm_mask(device, 0x644, sb->irq_mask, sb->irq_mask); + nvkm_mc_intr_mask(device, sb->devidx, true); return 0; } @@ -97,14 +96,13 @@ nvkm_secboot_falcon_disable(struct nvkm_secboot *sb) struct nvkm_device *device = sb->subdev.device; /* disable IRQs and wait for any previous code to complete */ - nvkm_mask(device, 0x644, sb->irq_mask, 0x0); - nvkm_mask(device, 0x640, sb->irq_mask, 0x0); + nvkm_mc_intr_mask(device, sb->devidx, false); nvkm_wr32(device, sb->base + 0x014, 0xff); falcon_wait_idle(device, sb->base); /* disable engine */ - nvkm_mask(device, 0x200, sb->enable_mask, 0x0); + nvkm_mc_disable(device, sb->devidx); return 0; } @@ -216,14 +214,7 @@ nvkm_secboot_oneinit(struct nvkm_subdev *subdev) return ret; } - /* - * Build all blobs - the same blobs can be used to perform secure boot - * multiple times - */ - if (sb->func->prepare_blobs) - ret = sb->func->prepare_blobs(sb); - - return ret; + return 0; } static int @@ -270,9 +261,8 @@ nvkm_secboot_ctor(const struct nvkm_secboot_func *func, /* setup the performing falcon's base address and masks */ switch (func->boot_falcon) { case NVKM_SECBOOT_FALCON_PMU: + sb->devidx = NVKM_SUBDEV_PMU; sb->base = 0x10a000; - sb->irq_mask = 0x1000000; - sb->enable_mask = 0x2000; break; default: nvkm_error(&sb->subdev, "invalid secure boot falcon\n"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c index cc100dc940ea..f1e2dc914366 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm200.c @@ -860,6 +860,8 @@ gm200_secboot_prepare_ls_blob(struct gm200_secboot *gsb) /* Write LS blob */ ret = ls_ucode_mgr_write_wpr(gsb, &mgr, gsb->ls_blob); + if (ret) + nvkm_gpuobj_del(&gsb->ls_blob); cleanup: ls_ucode_mgr_cleanup(&mgr); @@ -1023,29 +1025,34 @@ gm20x_secboot_prepare_blobs(struct gm200_secboot *gsb) int ret; /* Load and prepare the managed falcon's firmwares */ - ret = gm200_secboot_prepare_ls_blob(gsb); - if (ret) - return ret; + if (!gsb->ls_blob) { + ret = gm200_secboot_prepare_ls_blob(gsb); + if (ret) + return ret; + } /* Load the HS firmware that will load the LS firmwares */ - ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load", - &gsb->acr_load_blob, - &gsb->acr_load_bl_desc, true); - if (ret) - return ret; + if (!gsb->acr_load_blob) { + ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_load", + &gsb->acr_load_blob, + &gsb->acr_load_bl_desc, true); + if (ret) + return ret; + } /* Load the HS firmware bootloader */ - ret = gm200_secboot_prepare_hsbl_blob(gsb); - if (ret) - return ret; + if (!gsb->hsbl_blob) { + ret = gm200_secboot_prepare_hsbl_blob(gsb); + if (ret) + return ret; + } return 0; } static int -gm200_secboot_prepare_blobs(struct nvkm_secboot *sb) +gm200_secboot_prepare_blobs(struct gm200_secboot *gsb) { - struct gm200_secboot *gsb = gm200_secboot(sb); int ret; ret = gm20x_secboot_prepare_blobs(gsb); @@ -1053,15 +1060,37 @@ gm200_secboot_prepare_blobs(struct nvkm_secboot *sb) return ret; /* dGPU only: load the HS firmware that unprotects the WPR region */ - ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload", - &gsb->acr_unload_blob, - &gsb->acr_unload_bl_desc, false); - if (ret) - return ret; + if (!gsb->acr_unload_blob) { + ret = gm200_secboot_prepare_hs_blob(gsb, "acr/ucode_unload", + &gsb->acr_unload_blob, + &gsb->acr_unload_bl_desc, false); + if (ret) + return ret; + } return 0; } +static int +gm200_secboot_blobs_ready(struct gm200_secboot *gsb) +{ + struct nvkm_subdev *subdev = &gsb->base.subdev; + int ret; + + /* firmware already loaded, nothing to do... */ + if (gsb->firmware_ok) + return 0; + + ret = gsb->func->prepare_blobs(gsb); + if (ret) { + nvkm_error(subdev, "failed to load secure firmware\n"); + return ret; + } + + gsb->firmware_ok = true; + + return 0; +} /* @@ -1234,6 +1263,11 @@ gm200_secboot_reset(struct nvkm_secboot *sb, enum nvkm_secboot_falcon falcon) struct gm200_secboot *gsb = gm200_secboot(sb); int ret; + /* Make sure all blobs are ready */ + ret = gm200_secboot_blobs_ready(gsb); + if (ret) + return ret; + /* * Dummy GM200 implementation: perform secure boot each time we are * called on FECS. Since only FECS and GPCCS are managed and started @@ -1373,7 +1407,6 @@ gm200_secboot = { .dtor = gm200_secboot_dtor, .init = gm200_secboot_init, .fini = gm200_secboot_fini, - .prepare_blobs = gm200_secboot_prepare_blobs, .reset = gm200_secboot_reset, .start = gm200_secboot_start, .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS) | @@ -1415,6 +1448,7 @@ gm200_secboot_func = { .bl_desc_size = sizeof(struct gm200_flcn_bl_desc), .fixup_bl_desc = gm200_secboot_fixup_bl_desc, .fixup_hs_desc = gm200_secboot_fixup_hs_desc, + .prepare_blobs = gm200_secboot_prepare_blobs, }; int @@ -1487,3 +1521,19 @@ MODULE_FIRMWARE("nvidia/gm206/gr/sw_ctx.bin"); MODULE_FIRMWARE("nvidia/gm206/gr/sw_nonctx.bin"); MODULE_FIRMWARE("nvidia/gm206/gr/sw_bundle_init.bin"); MODULE_FIRMWARE("nvidia/gm206/gr/sw_method_init.bin"); + +MODULE_FIRMWARE("nvidia/gp100/acr/bl.bin"); +MODULE_FIRMWARE("nvidia/gp100/acr/ucode_load.bin"); +MODULE_FIRMWARE("nvidia/gp100/acr/ucode_unload.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_data.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/fecs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_bl.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_inst.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_data.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/gpccs_sig.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_ctx.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_nonctx.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_bundle_init.bin"); +MODULE_FIRMWARE("nvidia/gp100/gr/sw_method_init.bin"); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c index 684320484b70..d5395ebfe8d3 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/gm20b.c @@ -42,6 +42,32 @@ struct gm20b_flcn_bl_desc { u32 data_size; }; +static int +gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb) +{ + struct nvkm_subdev *subdev = &gsb->base.subdev; + int acr_size; + int ret; + + ret = gm20x_secboot_prepare_blobs(gsb); + if (ret) + return ret; + + acr_size = gsb->acr_load_blob->size; + /* + * On Tegra the WPR region is set by the bootloader. It is illegal for + * the HS blob to be larger than this region. + */ + if (acr_size > gsb->wpr_size) { + nvkm_error(subdev, "WPR region too small for FW blob!\n"); + nvkm_error(subdev, "required: %dB\n", acr_size); + nvkm_error(subdev, "WPR size: %dB\n", gsb->wpr_size); + return -ENOSPC; + } + + return 0; +} + /** * gm20b_secboot_fixup_bl_desc - adapt BL descriptor to format used by GM20B FW * @@ -88,6 +114,7 @@ gm20b_secboot_func = { .bl_desc_size = sizeof(struct gm20b_flcn_bl_desc), .fixup_bl_desc = gm20b_secboot_fixup_bl_desc, .fixup_hs_desc = gm20b_secboot_fixup_hs_desc, + .prepare_blobs = gm20b_secboot_prepare_blobs, }; @@ -147,32 +174,6 @@ gm20b_tegra_read_wpr(struct gm200_secboot *gsb) #endif static int -gm20b_secboot_prepare_blobs(struct nvkm_secboot *sb) -{ - struct gm200_secboot *gsb = gm200_secboot(sb); - int acr_size; - int ret; - - ret = gm20x_secboot_prepare_blobs(gsb); - if (ret) - return ret; - - acr_size = gsb->acr_load_blob->size; - /* - * On Tegra the WPR region is set by the bootloader. It is illegal for - * the HS blob to be larger than this region. - */ - if (acr_size > gsb->wpr_size) { - nvkm_error(&sb->subdev, "WPR region too small for FW blob!\n"); - nvkm_error(&sb->subdev, "required: %dB\n", acr_size); - nvkm_error(&sb->subdev, "WPR size: %dB\n", gsb->wpr_size); - return -ENOSPC; - } - - return 0; -} - -static int gm20b_secboot_init(struct nvkm_secboot *sb) { struct gm200_secboot *gsb = gm200_secboot(sb); @@ -189,7 +190,6 @@ static const struct nvkm_secboot_func gm20b_secboot = { .dtor = gm200_secboot_dtor, .init = gm20b_secboot_init, - .prepare_blobs = gm20b_secboot_prepare_blobs, .reset = gm200_secboot_reset, .start = gm200_secboot_start, .managed_falcons = BIT(NVKM_SECBOOT_FALCON_FECS), diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h index f2b09dee7c5d..a9a8a0e1017e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/secboot/priv.h @@ -30,7 +30,6 @@ struct nvkm_secboot_func { int (*init)(struct nvkm_secboot *); int (*fini)(struct nvkm_secboot *, bool suspend); void *(*dtor)(struct nvkm_secboot *); - int (*prepare_blobs)(struct nvkm_secboot *); int (*reset)(struct nvkm_secboot *, enum nvkm_secboot_falcon); int (*start)(struct nvkm_secboot *, enum nvkm_secboot_falcon); @@ -147,10 +146,8 @@ struct hsflcn_acr_desc { * @inst: instance block for HS falcon * @pgd: page directory for the HS falcon * @vm: address space used by the HS falcon - * @bl_desc_size: size of the BL descriptor used by this chip. - * @fixup_bl_desc: hook that generates the proper BL descriptor format from - * the generic GM200 format into a data array of size - * bl_desc_size + * @falcon_state: current state of the managed falcons + * @firmware_ok: whether the firmware blobs have been created */ struct gm200_secboot { struct nvkm_secboot base; @@ -196,9 +193,19 @@ struct gm200_secboot { RUNNING, } falcon_state[NVKM_SECBOOT_FALCON_END]; + bool firmware_ok; }; #define gm200_secboot(sb) container_of(sb, struct gm200_secboot, base) +/** + * Contains functions we wish to abstract between GM200-like implementations + * @bl_desc_size: size of the BL descriptor used by this chip. + * @fixup_bl_desc: hook that generates the proper BL descriptor format from + * the generic GM200 format into a data array of size + * bl_desc_size + * @fixup_hs_desc: hook that twiddles the HS descriptor before it is used + * @prepare_blobs: prepares the various blobs needed for secure booting + */ struct gm200_secboot_func { /* * Size of the bootloader descriptor for this chip. A block of this @@ -214,6 +221,7 @@ struct gm200_secboot_func { * we want the HS FW to set up. */ void (*fixup_hs_desc)(struct gm200_secboot *, struct hsflcn_acr_desc *); + int (*prepare_blobs)(struct gm200_secboot *); }; int gm200_secboot_init(struct nvkm_secboot *); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c index a1b264664aad..fe063d5728e2 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/base.c @@ -41,8 +41,9 @@ nvkm_top_device_new(struct nvkm_top *top) } u32 -nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index) +nvkm_top_reset(struct nvkm_device *device, enum nvkm_devidx index) { + struct nvkm_top *top = device->top; struct nvkm_top_device *info; if (top) { @@ -56,8 +57,25 @@ nvkm_top_reset(struct nvkm_top *top, enum nvkm_devidx index) } u32 -nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs) +nvkm_top_intr_mask(struct nvkm_device *device, enum nvkm_devidx devidx) { + struct nvkm_top *top = device->top; + struct nvkm_top_device *info; + + if (top) { + list_for_each_entry(info, &top->device, head) { + if (info->index == devidx && info->intr >= 0) + return BIT(info->intr); + } + } + + return 0; +} + +u32 +nvkm_top_intr(struct nvkm_device *device, u32 intr, u64 *psubdevs) +{ + struct nvkm_top *top = device->top; struct nvkm_top_device *info; u64 subdevs = 0; u32 handled = 0; @@ -78,8 +96,9 @@ nvkm_top_intr(struct nvkm_top *top, u32 intr, u64 *psubdevs) } enum nvkm_devidx -nvkm_top_fault(struct nvkm_top *top, int fault) +nvkm_top_fault(struct nvkm_device *device, int fault) { + struct nvkm_top *top = device->top; struct nvkm_top_device *info; list_for_each_entry(info, &top->device, head) { @@ -91,8 +110,9 @@ nvkm_top_fault(struct nvkm_top *top, int fault) } enum nvkm_devidx -nvkm_top_engine(struct nvkm_top *top, int index, int *runl, int *engn) +nvkm_top_engine(struct nvkm_device *device, int index, int *runl, int *engn) { + struct nvkm_top *top = device->top; struct nvkm_top_device *info; int n = 0; diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c index e06acc340e99..efac3402f9dd 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/top/gk104.c @@ -29,7 +29,7 @@ gk104_top_oneinit(struct nvkm_top *top) struct nvkm_subdev *subdev = &top->subdev; struct nvkm_device *device = subdev->device; struct nvkm_top_device *info = NULL; - u32 data, type; + u32 data, type, inst; int i; for (i = 0; i < 64; i++) { @@ -37,6 +37,7 @@ gk104_top_oneinit(struct nvkm_top *top) if (!(info = nvkm_top_device_new(top))) return -ENOMEM; type = ~0; + inst = 0; } data = nvkm_rd32(device, 0x022700 + (i * 0x04)); @@ -45,6 +46,7 @@ gk104_top_oneinit(struct nvkm_top *top) case 0x00000000: /* NOT_VALID */ continue; case 0x00000001: /* DATA */ + inst = (data & 0x3c000000) >> 26; info->addr = (data & 0x00fff000); info->fault = (data & 0x000000f8) >> 3; break; @@ -67,27 +69,32 @@ gk104_top_oneinit(struct nvkm_top *top) continue; /* Translate engine type to NVKM engine identifier. */ +#define A_(A) if (inst == 0) info->index = NVKM_ENGINE_##A +#define B_(A) if (inst + NVKM_ENGINE_##A##0 < NVKM_ENGINE_##A##_LAST + 1) \ + info->index = NVKM_ENGINE_##A##0 + inst switch (type) { - case 0x00000000: info->index = NVKM_ENGINE_GR; break; - case 0x00000001: info->index = NVKM_ENGINE_CE0; break; - case 0x00000002: info->index = NVKM_ENGINE_CE1; break; - case 0x00000003: info->index = NVKM_ENGINE_CE2; break; - case 0x00000008: info->index = NVKM_ENGINE_MSPDEC; break; - case 0x00000009: info->index = NVKM_ENGINE_MSPPP; break; - case 0x0000000a: info->index = NVKM_ENGINE_MSVLD; break; - case 0x0000000b: info->index = NVKM_ENGINE_MSENC; break; - case 0x0000000c: info->index = NVKM_ENGINE_VIC; break; - case 0x0000000d: info->index = NVKM_ENGINE_SEC; break; - case 0x0000000e: info->index = NVKM_ENGINE_NVENC0; break; - case 0x0000000f: info->index = NVKM_ENGINE_NVENC1; break; - case 0x00000010: info->index = NVKM_ENGINE_NVDEC; break; + case 0x00000000: A_(GR ); break; + case 0x00000001: A_(CE0 ); break; + case 0x00000002: A_(CE1 ); break; + case 0x00000003: A_(CE2 ); break; + case 0x00000008: A_(MSPDEC); break; + case 0x00000009: A_(MSPPP ); break; + case 0x0000000a: A_(MSVLD ); break; + case 0x0000000b: A_(MSENC ); break; + case 0x0000000c: A_(VIC ); break; + case 0x0000000d: A_(SEC ); break; + case 0x0000000e: B_(NVENC ); break; + case 0x0000000f: A_(NVENC1); break; + case 0x00000010: A_(NVDEC ); break; + case 0x00000013: B_(CE ); break; break; default: break; } - nvkm_debug(subdev, "%02x (%8s): addr %06x fault %2d engine %2d " - "runlist %2d intr %2d reset %2d\n", type, + nvkm_debug(subdev, "%02x.%d (%8s): addr %06x fault %2d " + "engine %2d runlist %2d intr %2d " + "reset %2d\n", type, inst, info->index == NVKM_SUBDEV_NR ? NULL : nvkm_subdev_name[info->index], info->addr, info->fault, info->engine, info->runlist, diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c index 6b2d7531a7ff..1c3d23b0e84a 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/base.c @@ -120,6 +120,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt) data = nvbios_volt_parse(bios, &ver, &hdr, &cnt, &len, &info); if (data && info.vidmask && info.base && info.step) { + volt->min_uv = info.min; + volt->max_uv = info.max; for (i = 0; i < info.vidmask + 1; i++) { if (info.base >= info.min && info.base <= info.max) { @@ -131,6 +133,8 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt) } volt->vid_mask = info.vidmask; } else if (data && info.vidmask) { + volt->min_uv = 0xffffffff; + volt->max_uv = 0; for (i = 0; i < cnt; i++) { data = nvbios_volt_entry_parse(bios, i, &ver, &hdr, &ivid); @@ -138,9 +142,14 @@ nvkm_volt_parse_bios(struct nvkm_bios *bios, struct nvkm_volt *volt) volt->vid[volt->vid_nr].uv = ivid.voltage; volt->vid[volt->vid_nr].vid = ivid.vid; volt->vid_nr++; + volt->min_uv = min(volt->min_uv, ivid.voltage); + volt->max_uv = max(volt->max_uv, ivid.voltage); } } volt->vid_mask = info.vidmask; + } else if (data && info.type == NVBIOS_VOLT_PWM) { + volt->min_uv = info.base; + volt->max_uv = info.base + info.pwm_range; } } @@ -181,8 +190,11 @@ nvkm_volt_ctor(const struct nvkm_volt_func *func, struct nvkm_device *device, volt->func = func; /* Assuming the non-bios device should build the voltage table later */ - if (bios) + if (bios) { nvkm_volt_parse_bios(bios, volt); + nvkm_debug(&volt->subdev, "min: %iuv max: %iuv\n", + volt->min_uv, volt->max_uv); + } if (volt->vid_nr) { for (i = 0; i < volt->vid_nr; i++) { diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c index d554455326da..ce5d83cdc7cf 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.c @@ -77,18 +77,19 @@ gk20a_volt_get_cvb_t_voltage(int speedo, int temp, int s_scale, int t_scale, return mv; } -int +static int gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo) { + static const int v_scale = 1000; int mv; mv = gk20a_volt_get_cvb_t_voltage(speedo, -10, 100, 10, coef); - mv = DIV_ROUND_UP(mv, 1000); + mv = DIV_ROUND_UP(mv, v_scale); return mv * 1000; } -int +static int gk20a_volt_vid_get(struct nvkm_volt *base) { struct gk20a_volt *volt = gk20a_volt(base); @@ -103,7 +104,7 @@ gk20a_volt_vid_get(struct nvkm_volt *base) return -EINVAL; } -int +static int gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid) { struct gk20a_volt *volt = gk20a_volt(base); @@ -113,7 +114,7 @@ gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid) return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000); } -int +static int gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition) { struct gk20a_volt *volt = gk20a_volt(base); @@ -143,9 +144,9 @@ gk20a_volt = { }; int -_gk20a_volt_ctor(struct nvkm_device *device, int index, - const struct cvb_coef *coefs, int nb_coefs, - struct gk20a_volt *volt) +gk20a_volt_ctor(struct nvkm_device *device, int index, + const struct cvb_coef *coefs, int nb_coefs, + int vmin, struct gk20a_volt *volt) { struct nvkm_device_tegra *tdev = device->func->tegra(device); int i, uv; @@ -160,9 +161,9 @@ _gk20a_volt_ctor(struct nvkm_device *device, int index, volt->base.vid_nr = nb_coefs; for (i = 0; i < volt->base.vid_nr; i++) { volt->base.vid[i].vid = i; - volt->base.vid[i].uv = - gk20a_volt_calc_voltage(&coefs[i], - tdev->gpu_speedo); + volt->base.vid[i].uv = max( + gk20a_volt_calc_voltage(&coefs[i], tdev->gpu_speedo), + vmin); nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i, volt->base.vid[i].vid, volt->base.vid[i].uv); } @@ -180,6 +181,6 @@ gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) return -ENOMEM; *pvolt = &volt->base; - return _gk20a_volt_ctor(device, index, gk20a_cvb_coef, - ARRAY_SIZE(gk20a_cvb_coef), volt); + return gk20a_volt_ctor(device, index, gk20a_cvb_coef, + ARRAY_SIZE(gk20a_cvb_coef), 0, volt); } diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h index 0fa3b502bcf8..6a6c97f9684e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gk20a.h @@ -37,13 +37,8 @@ struct gk20a_volt { struct regulator *vdd; }; -int _gk20a_volt_ctor(struct nvkm_device *device, int index, - const struct cvb_coef *coefs, int nb_coefs, - struct gk20a_volt *volt); - -int gk20a_volt_calc_voltage(const struct cvb_coef *coef, int speedo); -int gk20a_volt_vid_get(struct nvkm_volt *volt); -int gk20a_volt_vid_set(struct nvkm_volt *volt, u8 vid); -int gk20a_volt_set_id(struct nvkm_volt *volt, u8 id, int condition); +int gk20a_volt_ctor(struct nvkm_device *device, int index, + const struct cvb_coef *coefs, int nb_coefs, + int vmin, struct gk20a_volt *volt); #endif diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c index 49b5ecb701e4..74db4d28930f 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/volt/gm20b.c @@ -41,16 +41,52 @@ const struct cvb_coef gm20b_cvb_coef[] = { /* 921600 */ { 2647676, -106455, 1632 }, }; +static const struct cvb_coef gm20b_na_cvb_coef[] = { + /* KHz, c0, c1, c2, c3, c4, c5 */ + /* 76800 */ { 814294, 8144, -940, 808, -21583, 226 }, + /* 153600 */ { 856185, 8144, -940, 808, -21583, 226 }, + /* 230400 */ { 898077, 8144, -940, 808, -21583, 226 }, + /* 307200 */ { 939968, 8144, -940, 808, -21583, 226 }, + /* 384000 */ { 981860, 8144, -940, 808, -21583, 226 }, + /* 460800 */ { 1023751, 8144, -940, 808, -21583, 226 }, + /* 537600 */ { 1065642, 8144, -940, 808, -21583, 226 }, + /* 614400 */ { 1107534, 8144, -940, 808, -21583, 226 }, + /* 691200 */ { 1149425, 8144, -940, 808, -21583, 226 }, + /* 768000 */ { 1191317, 8144, -940, 808, -21583, 226 }, + /* 844800 */ { 1233208, 8144, -940, 808, -21583, 226 }, + /* 921600 */ { 1275100, 8144, -940, 808, -21583, 226 }, + /* 998400 */ { 1316991, 8144, -940, 808, -21583, 226 }, +}; + +const u32 speedo_to_vmin[] = { + /* 0, 1, 2, 3, 4, */ + 950000, 840000, 818750, 840000, 810000, +}; + int gm20b_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) { + struct nvkm_device_tegra *tdev = device->func->tegra(device); struct gk20a_volt *volt; + u32 vmin; + + if (tdev->gpu_speedo_id >= ARRAY_SIZE(speedo_to_vmin)) { + nvdev_error(device, "unsupported speedo %d\n", + tdev->gpu_speedo_id); + return -EINVAL; + } volt = kzalloc(sizeof(*volt), GFP_KERNEL); if (!volt) return -ENOMEM; *pvolt = &volt->base; - return _gk20a_volt_ctor(device, index, gm20b_cvb_coef, - ARRAY_SIZE(gm20b_cvb_coef), volt); + vmin = speedo_to_vmin[tdev->gpu_speedo_id]; + + if (tdev->gpu_speedo_id >= 1) + return gk20a_volt_ctor(device, index, gm20b_na_cvb_coef, + ARRAY_SIZE(gm20b_na_cvb_coef), vmin, volt); + else + return gk20a_volt_ctor(device, index, gm20b_cvb_coef, + ARRAY_SIZE(gm20b_cvb_coef), vmin, volt); } diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c index 6f45e9d00b41..e1be5e795cd8 100644 --- a/drivers/gpu/drm/omapdrm/dss/dsi.c +++ b/drivers/gpu/drm/omapdrm/dss/dsi.c @@ -1167,7 +1167,6 @@ static int dsi_regulator_init(struct platform_device *dsidev) { struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); struct regulator *vdds_dsi; - int r; if (dsi->vdds_dsi_reg != NULL) return 0; diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c index 061f9bab4c9b..0c0a5139a301 100644 --- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c +++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c @@ -120,7 +120,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data) static int hdmi_init_regulator(void) { - int r; struct regulator *reg; if (hdmi.vdda_reg != NULL) diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index b5d4b41361bd..04270f5d110c 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -203,7 +203,7 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush) { if (!qxl_check_idle(qdev->release_ring)) { - queue_work(qdev->gc_queue, &qdev->gc_work); + schedule_work(&qdev->gc_work); if (flush) flush_work(&qdev->gc_work); return true; diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 56e1d633875e..ffe885395145 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -37,7 +37,6 @@ static int alloc_clips(struct qxl_device *qdev, * the qxl_clip_rects. This is *not* the same as the memory allocated * on the device, it is offset to qxl_clip_rects.chunk.data */ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, - struct qxl_drawable *drawable, unsigned num_clips, struct qxl_bo *clips_bo) { @@ -136,6 +135,8 @@ static int qxl_palette_create_1bit(struct qxl_bo *palette_bo, * correctly globaly, since that would require * tracking all of our palettes. */ ret = qxl_bo_kmap(palette_bo, (void **)&pal); + if (ret) + return ret; pal->num_ents = 2; pal->unique = unique++; if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { @@ -349,7 +350,7 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, if (ret) goto out_release_backoff; - rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo); + rects = drawable_set_clipping(qdev, num_clips, clips_bo); if (!rects) goto out_release_backoff; diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 3ad6604b34ce..8e633caa4078 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -321,7 +321,6 @@ struct qxl_device { struct qxl_bo *current_release_bo[3]; int current_release_bo_offset[3]; - struct workqueue_struct *gc_queue; struct work_struct gc_work; struct drm_property *hotplug_mode_update_property; diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c index 2319800b7add..e642242728c0 100644 --- a/drivers/gpu/drm/qxl/qxl_kms.c +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -258,7 +258,6 @@ static int qxl_device_init(struct qxl_device *qdev, (unsigned long)qdev->surfaceram_size); - qdev->gc_queue = create_singlethread_workqueue("qxl_gc"); INIT_WORK(&qdev->gc_work, qxl_gc_work); return 0; @@ -270,10 +269,7 @@ static void qxl_device_fini(struct qxl_device *qdev) qxl_bo_unref(&qdev->current_release_bo[0]); if (qdev->current_release_bo[1]) qxl_bo_unref(&qdev->current_release_bo[1]); - flush_workqueue(qdev->gc_queue); - destroy_workqueue(qdev->gc_queue); - qdev->gc_queue = NULL; - + flush_work(&qdev->gc_work); qxl_ring_free(qdev->command_ring); qxl_ring_free(qdev->cursor_ring); qxl_ring_free(qdev->release_ring); @@ -310,10 +306,6 @@ int qxl_driver_load(struct drm_device *dev, unsigned long flags) struct qxl_device *qdev; int r; - /* require kms */ - if (!drm_core_check_feature(dev, DRIVER_MODESET)) - return -ENODEV; - qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL); if (qdev == NULL) return -ENOMEM; diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 0738d74c8d04..d50c9679e631 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -350,11 +350,19 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_mem_reg *old_mem = &bo->mem; + int ret; + + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); + if (ret) + return ret; + + if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { qxl_move_null(bo, new_mem); return 0; } - return ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + return ttm_bo_move_memcpy(bo, evict, interruptible, + no_wait_gpu, new_mem); } static void qxl_bo_move_notify(struct ttm_buffer_object *bo, diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index e91763d5d800..a97abc8af657 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -589,7 +589,8 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE61(rdev) || ASIC_IS_DCE8(rdev)) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; /* use frac fb div on RS780/RS880 */ - if ((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) + if (((rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880)) + && !radeon_crtc->ss_enabled) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; if (ASIC_IS_DCE32(rdev) && mode->clock > 165000) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; @@ -626,7 +627,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, if (radeon_crtc->ss.refdiv) { radeon_crtc->pll_flags |= RADEON_PLL_USE_REF_DIV; radeon_crtc->pll_reference_div = radeon_crtc->ss.refdiv; - if (ASIC_IS_AVIVO(rdev)) + if (rdev->family >= CHIP_RV770) radeon_crtc->pll_flags |= RADEON_PLL_USE_FRAC_FB_DIV; } } diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index ba192a35c607..0c1b9ff433af 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -53,6 +53,7 @@ MODULE_FIRMWARE("radeon/bonaire_mc.bin"); MODULE_FIRMWARE("radeon/bonaire_rlc.bin"); MODULE_FIRMWARE("radeon/bonaire_sdma.bin"); MODULE_FIRMWARE("radeon/bonaire_smc.bin"); +MODULE_FIRMWARE("radeon/bonaire_k_smc.bin"); MODULE_FIRMWARE("radeon/HAWAII_pfp.bin"); MODULE_FIRMWARE("radeon/HAWAII_me.bin"); @@ -72,6 +73,7 @@ MODULE_FIRMWARE("radeon/hawaii_mc.bin"); MODULE_FIRMWARE("radeon/hawaii_rlc.bin"); MODULE_FIRMWARE("radeon/hawaii_sdma.bin"); MODULE_FIRMWARE("radeon/hawaii_smc.bin"); +MODULE_FIRMWARE("radeon/hawaii_k_smc.bin"); MODULE_FIRMWARE("radeon/KAVERI_pfp.bin"); MODULE_FIRMWARE("radeon/KAVERI_me.bin"); @@ -1990,12 +1992,17 @@ static int cik_init_microcode(struct radeon_device *rdev) int new_fw = 0; int err; int num_fw; + bool new_smc = false; DRM_DEBUG("\n"); switch (rdev->family) { case CHIP_BONAIRE: chip_name = "BONAIRE"; + if ((rdev->pdev->revision == 0x80) || + (rdev->pdev->revision == 0x81) || + (rdev->pdev->device == 0x665f)) + new_smc = true; new_chip_name = "bonaire"; pfp_req_size = CIK_PFP_UCODE_SIZE * 4; me_req_size = CIK_ME_UCODE_SIZE * 4; @@ -2010,6 +2017,8 @@ static int cik_init_microcode(struct radeon_device *rdev) break; case CHIP_HAWAII: chip_name = "HAWAII"; + if (rdev->pdev->revision == 0x80) + new_smc = true; new_chip_name = "hawaii"; pfp_req_size = CIK_PFP_UCODE_SIZE * 4; me_req_size = CIK_ME_UCODE_SIZE * 4; @@ -2259,7 +2268,10 @@ static int cik_init_microcode(struct radeon_device *rdev) } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); + if (new_smc) + snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); if (err) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); @@ -8354,7 +8366,8 @@ static int cik_startup(struct radeon_device *rdev) } } rdev->rlc.cs_data = ci_cs_data; - rdev->rlc.cp_table_size = CP_ME_TABLE_SIZE * 5 * 4; + rdev->rlc.cp_table_size = ALIGN(CP_ME_TABLE_SIZE * 5 * 4, 2048); /* CP JT */ + rdev->rlc.cp_table_size += 64 * 1024; /* GDS */ r = sumo_rlc_init(rdev); if (r) { DRM_ERROR("Failed to init rlc BOs!\n"); diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c index 0d3f744de35a..d960d3915408 100644 --- a/drivers/gpu/drm/radeon/evergreen_cs.c +++ b/drivers/gpu/drm/radeon/evergreen_cs.c @@ -2209,6 +2209,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p, } break; } + case PACKET3_PFP_SYNC_ME: + if (pkt->count) { + DRM_ERROR("bad PFP_SYNC_ME\n"); + return -EINVAL; + } + break; case PACKET3_SURFACE_SYNC: if (pkt->count != 3) { DRM_ERROR("bad SURFACE_SYNC\n"); @@ -3381,6 +3387,7 @@ static int evergreen_vm_packet3_check(struct radeon_device *rdev, case PACKET3_MPEG_INDEX: case PACKET3_WAIT_REG_MEM: case PACKET3_MEM_WRITE: + case PACKET3_PFP_SYNC_ME: case PACKET3_SURFACE_SYNC: case PACKET3_EVENT_WRITE: case PACKET3_EVENT_WRITE_EOP: diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 0b174e14e9a6..c8e3d394cde7 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -1624,6 +1624,7 @@ */ # define PACKET3_CP_DMA_CMD_SAIC (1 << 28) # define PACKET3_CP_DMA_CMD_DAIC (1 << 29) +#define PACKET3_PFP_SYNC_ME 0x42 #define PACKET3_SURFACE_SYNC 0x43 # define PACKET3_CB0_DEST_BASE_ENA (1 << 6) # define PACKET3_CB1_DEST_BASE_ENA (1 << 7) diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c index 95f4fea89302..86dcdf38b732 100644 --- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c +++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/acpi.h> #include <linux/pci.h> +#include <linux/delay.h> #include "radeon_acpi.h" @@ -27,6 +28,7 @@ struct radeon_atpx_functions { struct radeon_atpx { acpi_handle handle; struct radeon_atpx_functions functions; + bool is_hybrid; }; static struct radeon_atpx_priv { @@ -62,6 +64,14 @@ bool radeon_has_atpx(void) { return radeon_atpx_priv.atpx_detected; } +bool radeon_has_atpx_dgpu_power_cntl(void) { + return radeon_atpx_priv.atpx.functions.power_cntl; +} + +bool radeon_is_atpx_hybrid(void) { + return radeon_atpx_priv.atpx.is_hybrid; +} + /** * radeon_atpx_call - call an ATPX method * @@ -141,18 +151,12 @@ static void radeon_atpx_parse_functions(struct radeon_atpx_functions *f, u32 mas */ static int radeon_atpx_validate(struct radeon_atpx *atpx) { - /* make sure required functions are enabled */ - /* dGPU power control is required */ - if (atpx->functions.power_cntl == false) { - printk("ATPX dGPU power cntl not present, forcing\n"); - atpx->functions.power_cntl = true; - } + u32 valid_bits = 0; if (atpx->functions.px_params) { union acpi_object *info; struct atpx_px_params output; size_t size; - u32 valid_bits; info = radeon_atpx_call(atpx->handle, ATPX_FUNCTION_GET_PX_PARAMETERS, NULL); if (!info) @@ -171,19 +175,42 @@ static int radeon_atpx_validate(struct radeon_atpx *atpx) memcpy(&output, info->buffer.pointer, size); valid_bits = output.flags & output.valid_flags; - /* if separate mux flag is set, mux controls are required */ - if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { - atpx->functions.i2c_mux_cntl = true; - atpx->functions.disp_mux_cntl = true; - } - /* if any outputs are muxed, mux controls are required */ - if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | - ATPX_TV_SIGNAL_MUXED | - ATPX_DFP_SIGNAL_MUXED)) - atpx->functions.disp_mux_cntl = true; kfree(info); } + + /* if separate mux flag is set, mux controls are required */ + if (valid_bits & ATPX_SEPARATE_MUX_FOR_I2C) { + atpx->functions.i2c_mux_cntl = true; + atpx->functions.disp_mux_cntl = true; + } + /* if any outputs are muxed, mux controls are required */ + if (valid_bits & (ATPX_CRT1_RGB_SIGNAL_MUXED | + ATPX_TV_SIGNAL_MUXED | + ATPX_DFP_SIGNAL_MUXED)) + atpx->functions.disp_mux_cntl = true; + + /* some bioses set these bits rather than flagging power_cntl as supported */ + if (valid_bits & (ATPX_DYNAMIC_PX_SUPPORTED | + ATPX_DYNAMIC_DGPU_POWER_OFF_SUPPORTED)) + atpx->functions.power_cntl = true; + + atpx->is_hybrid = false; + if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) { + printk("ATPX Hybrid Graphics\n"); +#if 1 + /* This is a temporary hack until the D3 cold support + * makes it upstream. The ATPX power_control method seems + * to still work on even if the system should be using + * the new standardized hybrid D3 cold ACPI interface. + */ + atpx->functions.power_cntl = true; +#else + atpx->functions.power_cntl = false; +#endif + atpx->is_hybrid = true; + } + return 0; } @@ -258,6 +285,10 @@ static int radeon_atpx_set_discrete_state(struct radeon_atpx *atpx, u8 state) if (!info) return -EIO; kfree(info); + + /* 200ms delay is required after off */ + if (state == 0) + msleep(200); } return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index 81a63d7f5cd9..b79f3b002471 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c @@ -2064,7 +2064,6 @@ radeon_add_atom_connector(struct drm_device *dev, RADEON_OUTPUT_CSC_BYPASS); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; @@ -2314,8 +2313,10 @@ radeon_add_atom_connector(struct drm_device *dev, } if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { - if (i2c_bus->valid) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (i2c_bus->valid) { + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } } else connector->polled = DRM_CONNECTOR_POLL_HPD; @@ -2391,7 +2392,6 @@ radeon_add_legacy_connector(struct drm_device *dev, 1); /* no HPD on analog connectors */ radeon_connector->hpd.hpd = RADEON_HPD_NONE; - connector->polled = DRM_CONNECTOR_POLL_CONNECT; connector->interlace_allowed = true; connector->doublescan_allowed = true; break; @@ -2476,10 +2476,13 @@ radeon_add_legacy_connector(struct drm_device *dev, } if (radeon_connector->hpd.hpd == RADEON_HPD_NONE) { - if (i2c_bus->valid) - connector->polled = DRM_CONNECTOR_POLL_CONNECT; + if (i2c_bus->valid) { + connector->polled = DRM_CONNECTOR_POLL_CONNECT | + DRM_CONNECTOR_POLL_DISCONNECT; + } } else connector->polled = DRM_CONNECTOR_POLL_HPD; + connector->display_info.subpixel_order = subpixel_order; drm_connector_register(connector); } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e721e6b2766e..a00dd2f74527 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -30,6 +30,7 @@ #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/radeon_drm.h> +#include <linux/pm_runtime.h> #include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include <linux/efi.h> @@ -630,6 +631,23 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) /* * GPU helpers function. */ + +/** + * radeon_device_is_virtual - check if we are running is a virtual environment + * + * Check if the asic has been passed through to a VM (all asics). + * Used at driver startup. + * Returns true if virtual or false if not. + */ +static bool radeon_device_is_virtual(void) +{ +#ifdef CONFIG_X86 + return boot_cpu_has(X86_FEATURE_HYPERVISOR); +#else + return false; +#endif +} + /** * radeon_card_posted - check if the hw has already been initialized * @@ -643,6 +661,10 @@ bool radeon_card_posted(struct radeon_device *rdev) { uint32_t reg; + /* for pass through, always force asic_init */ + if (radeon_device_is_virtual()) + return false; + /* required for EFI mode on macbook2,1 which uses an r5xx asic */ if (efi_enabled(EFI_BOOT) && (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) && @@ -1505,6 +1527,9 @@ int radeon_device_init(struct radeon_device *rdev, return 0; failed: + /* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */ + if (radeon_is_px(ddev)) + pm_runtime_put_noidle(ddev->dev); if (runtime) vga_switcheroo_fini_domain_pm_ops(rdev->dev); return r; @@ -1631,7 +1656,7 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend, radeon_agp_suspend(rdev); pci_save_state(dev->pdev); - if (freeze && rdev->family >= CHIP_R600) { + if (freeze && rdev->family >= CHIP_CEDAR) { rdev->asic->asic_reset(rdev, true); pci_restore_state(dev->pdev); } else if (suspend) { diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 3965d1916b9c..5f1cd695c965 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1711,6 +1711,7 @@ void radeon_modeset_fini(struct radeon_device *rdev) radeon_afmt_fini(rdev); drm_kms_helper_poll_fini(rdev->ddev); radeon_hpd_fini(rdev); + drm_crtc_force_disable_all(rdev->ddev); drm_mode_config_cleanup(rdev->ddev); rdev->mode_info.mode_config_initialized = false; } diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index a455dc7d4aa1..c01a7c6abb49 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -93,9 +93,10 @@ * 2.43.0 - RADEON_INFO_GPU_RESET_COUNTER * 2.44.0 - SET_APPEND_CNT packet3 support * 2.45.0 - Allow setting shader registers using DMA/COPY packet3 on SI + * 2.46.0 - Add PFP_SYNC_ME support on evergreen */ #define KMS_DRIVER_MAJOR 2 -#define KMS_DRIVER_MINOR 45 +#define KMS_DRIVER_MINOR 46 #define KMS_DRIVER_PATCHLEVEL 0 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); int radeon_driver_unload_kms(struct drm_device *dev); @@ -162,9 +163,13 @@ void radeon_debugfs_cleanup(struct drm_minor *minor); #if defined(CONFIG_VGA_SWITCHEROO) void radeon_register_atpx_handler(void); void radeon_unregister_atpx_handler(void); +bool radeon_has_atpx_dgpu_power_cntl(void); +bool radeon_is_atpx_hybrid(void); #else static inline void radeon_register_atpx_handler(void) {} static inline void radeon_unregister_atpx_handler(void) {} +static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; } +static inline bool radeon_is_atpx_hybrid(void) { return false; } #endif int radeon_no_wb; @@ -404,7 +409,10 @@ static int radeon_pmops_runtime_suspend(struct device *dev) pci_save_state(pdev); pci_disable_device(pdev); pci_ignore_hotplug(pdev); - pci_set_power_state(pdev, PCI_D3cold); + if (radeon_is_atpx_hybrid()) + pci_set_power_state(pdev, PCI_D3cold); + else if (!radeon_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D3hot); drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; return 0; @@ -421,7 +429,9 @@ static int radeon_pmops_runtime_resume(struct device *dev) drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING; - pci_set_power_state(pdev, PCI_D0); + if (radeon_is_atpx_hybrid() || + !radeon_has_atpx_dgpu_power_cntl()) + pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); ret = pci_enable_device(pdev); if (ret) diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 414953c46a38..835563c1f0ed 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -63,7 +63,10 @@ int radeon_driver_unload_kms(struct drm_device *dev) if (rdev->rmmio == NULL) goto done_free; - pm_runtime_get_sync(dev->dev); + if (radeon_is_px(dev)) { + pm_runtime_get_sync(dev->dev); + pm_runtime_forbid(dev->dev); + } radeon_kfd_device_fini(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 590b0377fbe2..ffdad81ef964 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -300,8 +300,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, if (IS_ERR(fence)) return PTR_ERR(fence); - r = ttm_bo_move_accel_cleanup(bo, &fence->base, - evict, no_wait_gpu, new_mem); + r = ttm_bo_move_accel_cleanup(bo, &fence->base, evict, new_mem); radeon_fence_unref(&fence); return r; } @@ -403,6 +402,10 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem = &bo->mem; int r; + r = ttm_bo_wait(bo, interruptible, no_wait_gpu); + if (r) + return r; + /* Can't move a pinned BO */ rbo = container_of(bo, struct radeon_bo, tbo); if (WARN_ON_ONCE(rbo->pin_count > 0)) @@ -441,7 +444,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, if (r) { memcpy: - r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); + r = ttm_bo_move_memcpy(bo, evict, interruptible, + no_wait_gpu, new_mem); if (r) { return r; } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index b30e719dd56d..2523ca96c6c7 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -50,6 +50,7 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin"); MODULE_FIRMWARE("radeon/tahiti_mc.bin"); MODULE_FIRMWARE("radeon/tahiti_rlc.bin"); MODULE_FIRMWARE("radeon/tahiti_smc.bin"); +MODULE_FIRMWARE("radeon/tahiti_k_smc.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin"); MODULE_FIRMWARE("radeon/PITCAIRN_me.bin"); @@ -65,6 +66,7 @@ MODULE_FIRMWARE("radeon/pitcairn_ce.bin"); MODULE_FIRMWARE("radeon/pitcairn_mc.bin"); MODULE_FIRMWARE("radeon/pitcairn_rlc.bin"); MODULE_FIRMWARE("radeon/pitcairn_smc.bin"); +MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin"); MODULE_FIRMWARE("radeon/VERDE_pfp.bin"); MODULE_FIRMWARE("radeon/VERDE_me.bin"); @@ -80,6 +82,7 @@ MODULE_FIRMWARE("radeon/verde_ce.bin"); MODULE_FIRMWARE("radeon/verde_mc.bin"); MODULE_FIRMWARE("radeon/verde_rlc.bin"); MODULE_FIRMWARE("radeon/verde_smc.bin"); +MODULE_FIRMWARE("radeon/verde_k_smc.bin"); MODULE_FIRMWARE("radeon/OLAND_pfp.bin"); MODULE_FIRMWARE("radeon/OLAND_me.bin"); @@ -95,6 +98,7 @@ MODULE_FIRMWARE("radeon/oland_ce.bin"); MODULE_FIRMWARE("radeon/oland_mc.bin"); MODULE_FIRMWARE("radeon/oland_rlc.bin"); MODULE_FIRMWARE("radeon/oland_smc.bin"); +MODULE_FIRMWARE("radeon/oland_k_smc.bin"); MODULE_FIRMWARE("radeon/HAINAN_pfp.bin"); MODULE_FIRMWARE("radeon/HAINAN_me.bin"); @@ -110,6 +114,7 @@ MODULE_FIRMWARE("radeon/hainan_ce.bin"); MODULE_FIRMWARE("radeon/hainan_mc.bin"); MODULE_FIRMWARE("radeon/hainan_rlc.bin"); MODULE_FIRMWARE("radeon/hainan_smc.bin"); +MODULE_FIRMWARE("radeon/hainan_k_smc.bin"); static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh); static void si_pcie_gen3_enable(struct radeon_device *rdev); @@ -1653,12 +1658,16 @@ static int si_init_microcode(struct radeon_device *rdev) char fw_name[30]; int err; int new_fw = 0; + bool new_smc = false; DRM_DEBUG("\n"); switch (rdev->family) { case CHIP_TAHITI: chip_name = "TAHITI"; + /* XXX: figure out which Tahitis need the new ucode */ + if (0) + new_smc = true; new_chip_name = "tahiti"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1670,6 +1679,13 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_PITCAIRN: chip_name = "PITCAIRN"; + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->device == 0x6810) || + (rdev->pdev->device == 0x6811) || + (rdev->pdev->device == 0x6816) || + (rdev->pdev->device == 0x6817) || + (rdev->pdev->device == 0x6806)) + new_smc = true; new_chip_name = "pitcairn"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1681,6 +1697,16 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_VERDE: chip_name = "VERDE"; + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0x87) || + (rdev->pdev->device == 0x6820) || + (rdev->pdev->device == 0x6821) || + (rdev->pdev->device == 0x6822) || + (rdev->pdev->device == 0x6823) || + (rdev->pdev->device == 0x682A) || + (rdev->pdev->device == 0x682B)) + new_smc = true; new_chip_name = "verde"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1692,6 +1718,13 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_OLAND: chip_name = "OLAND"; + if ((rdev->pdev->revision == 0xC7) || + (rdev->pdev->revision == 0x80) || + (rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->device == 0x6604) || + (rdev->pdev->device == 0x6605)) + new_smc = true; new_chip_name = "oland"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1702,6 +1735,13 @@ static int si_init_microcode(struct radeon_device *rdev) break; case CHIP_HAINAN: chip_name = "HAINAN"; + if ((rdev->pdev->revision == 0x81) || + (rdev->pdev->revision == 0x83) || + (rdev->pdev->revision == 0xC3) || + (rdev->pdev->device == 0x6664) || + (rdev->pdev->device == 0x6665) || + (rdev->pdev->device == 0x6667)) + new_smc = true; new_chip_name = "hainan"; pfp_req_size = SI_PFP_UCODE_SIZE * 4; me_req_size = SI_PM4_UCODE_SIZE * 4; @@ -1847,7 +1887,10 @@ static int si_init_microcode(struct radeon_device *rdev) } } - snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); + if (new_smc) + snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name); + else + snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); if (err) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c index 0d8bdda736f9..e39fcef2e033 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c @@ -552,7 +552,7 @@ static irqreturn_t rcar_du_crtc_irq(int irq, void *arg) rcar_du_crtc_write(rcrtc, DSRCR, status & DSRCR_MASK); if (status & DSSR_FRM) { - drm_handle_vblank(rcrtc->crtc.dev, rcrtc->index); + drm_crtc_handle_vblank(&rcrtc->crtc); rcar_du_crtc_finish_page_flip(rcrtc); ret = IRQ_HANDLED; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index d665fb04d264..f0bd1ee8b128 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -433,6 +433,7 @@ static int rockchip_drm_platform_probe(struct platform_device *pdev) is_support_iommu = false; } + of_node_put(iommu); component_match_add(dev, &match, compare_of, port->parent); of_node_put(port); } diff --git a/drivers/gpu/drm/sti/Kconfig b/drivers/gpu/drm/sti/Kconfig index 5ad43a1bb260..494ab257f77c 100644 --- a/drivers/gpu/drm/sti/Kconfig +++ b/drivers/gpu/drm/sti/Kconfig @@ -7,5 +7,6 @@ config DRM_STI select DRM_KMS_CMA_HELPER select DRM_PANEL select FW_LOADER + select SND_SOC_HDMI_CODEC if SND_SOC help Choose this option to enable DRM on STM stiH41x chipset diff --git a/drivers/gpu/drm/sti/sti_awg_utils.c b/drivers/gpu/drm/sti/sti_awg_utils.c index a516eb869f6f..2da7d6866d5d 100644 --- a/drivers/gpu/drm/sti/sti_awg_utils.c +++ b/drivers/gpu/drm/sti/sti_awg_utils.c @@ -6,6 +6,8 @@ #include "sti_awg_utils.h" +#define AWG_DELAY (-5) + #define AWG_OPCODE_OFFSET 10 #define AWG_MAX_ARG 0x3ff @@ -125,7 +127,7 @@ static int awg_generate_line_signal( val = timing->blanking_level; ret |= awg_generate_instr(RPLSET, val, 0, 0, fwparams); - val = timing->trailing_pixels - 1; + val = timing->trailing_pixels - 1 + AWG_DELAY; ret |= awg_generate_instr(SKIP, val, 0, 0, fwparams); } diff --git a/drivers/gpu/drm/sti/sti_compositor.c b/drivers/gpu/drm/sti/sti_compositor.c index 794148ff0e57..bd74732ea09b 100644 --- a/drivers/gpu/drm/sti/sti_compositor.c +++ b/drivers/gpu/drm/sti/sti_compositor.c @@ -267,10 +267,12 @@ static int sti_compositor_probe(struct platform_device *pdev) vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0); if (vtg_np) compo->vtg_main = of_vtg_find(vtg_np); + of_node_put(vtg_np); vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 1); if (vtg_np) compo->vtg_aux = of_vtg_find(vtg_np); + of_node_put(vtg_np); platform_set_drvdata(pdev, compo); diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c index 7fab3af7473b..c7d734dc3cf4 100644 --- a/drivers/gpu/drm/sti/sti_crtc.c +++ b/drivers/gpu/drm/sti/sti_crtc.c @@ -23,22 +23,11 @@ static void sti_crtc_enable(struct drm_crtc *crtc) { struct sti_mixer *mixer = to_sti_mixer(crtc); - struct device *dev = mixer->dev; - struct sti_compositor *compo = dev_get_drvdata(dev); DRM_DEBUG_DRIVER("\n"); mixer->status = STI_MIXER_READY; - /* Prepare and enable the compo IP clock */ - if (mixer->id == STI_MIXER_MAIN) { - if (clk_prepare_enable(compo->clk_compo_main)) - DRM_INFO("Failed to prepare/enable compo_main clk\n"); - } else { - if (clk_prepare_enable(compo->clk_compo_aux)) - DRM_INFO("Failed to prepare/enable compo_aux clk\n"); - } - drm_crtc_vblank_on(crtc); } @@ -57,9 +46,8 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) struct sti_mixer *mixer = to_sti_mixer(crtc); struct device *dev = mixer->dev; struct sti_compositor *compo = dev_get_drvdata(dev); - struct clk *clk; + struct clk *compo_clk, *pix_clk; int rate = mode->clock * 1000; - int res; DRM_DEBUG_KMS("CRTC:%d (%s) mode:%d (%s)\n", crtc->base.id, sti_mixer_to_str(mixer), @@ -74,32 +62,46 @@ sti_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode) mode->vsync_start, mode->vsync_end, mode->vtotal, mode->type, mode->flags); - /* Set rate and prepare/enable pixel clock */ - if (mixer->id == STI_MIXER_MAIN) - clk = compo->clk_pix_main; - else - clk = compo->clk_pix_aux; + if (mixer->id == STI_MIXER_MAIN) { + compo_clk = compo->clk_compo_main; + pix_clk = compo->clk_pix_main; + } else { + compo_clk = compo->clk_compo_aux; + pix_clk = compo->clk_pix_aux; + } + + /* Prepare and enable the compo IP clock */ + if (clk_prepare_enable(compo_clk)) { + DRM_INFO("Failed to prepare/enable compositor clk\n"); + goto compo_error; + } - res = clk_set_rate(clk, rate); - if (res < 0) { + /* Set rate and prepare/enable pixel clock */ + if (clk_set_rate(pix_clk, rate) < 0) { DRM_ERROR("Cannot set rate (%dHz) for pix clk\n", rate); - return -EINVAL; + goto pix_error; } - if (clk_prepare_enable(clk)) { + if (clk_prepare_enable(pix_clk)) { DRM_ERROR("Failed to prepare/enable pix clk\n"); - return -EINVAL; + goto pix_error; } sti_vtg_set_config(mixer->id == STI_MIXER_MAIN ? compo->vtg_main : compo->vtg_aux, &crtc->mode); - res = sti_mixer_active_video_area(mixer, &crtc->mode); - if (res) { + if (sti_mixer_active_video_area(mixer, &crtc->mode)) { DRM_ERROR("Can't set active video area\n"); - return -EINVAL; + goto mixer_error; } - return res; + return 0; + +mixer_error: + clk_disable_unprepare(pix_clk); +pix_error: + clk_disable_unprepare(compo_clk); +compo_error: + return -EINVAL; } static void sti_crtc_disable(struct drm_crtc *crtc) @@ -130,7 +132,6 @@ static void sti_crtc_disable(struct drm_crtc *crtc) static void sti_crtc_mode_set_nofb(struct drm_crtc *crtc) { - sti_crtc_enable(crtc); sti_crtc_mode_set(crtc, &crtc->state->adjusted_mode); } @@ -221,9 +222,7 @@ static void sti_crtc_atomic_flush(struct drm_crtc *crtc, static const struct drm_crtc_helper_funcs sti_crtc_helper_funcs = { .enable = sti_crtc_enable, .disable = sti_crtc_disabling, - .mode_set = drm_helper_crtc_mode_set, .mode_set_nofb = sti_crtc_mode_set_nofb, - .mode_set_base = drm_helper_crtc_mode_set_base, .atomic_begin = sti_crtc_atomic_begin, .atomic_flush = sti_crtc_atomic_flush, }; diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c index ec3108074350..00881eb4536e 100644 --- a/drivers/gpu/drm/sti/sti_dvo.c +++ b/drivers/gpu/drm/sti/sti_dvo.c @@ -580,6 +580,7 @@ static int sti_dvo_probe(struct platform_device *pdev) dvo->panel_node = of_parse_phandle(np, "sti,panel", 0); if (!dvo->panel_node) DRM_ERROR("No panel associated to the dvo output\n"); + of_node_put(dvo->panel_node); platform_set_drvdata(pdev, dvo); diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c index 8d1402b245bf..fedc17f98d9b 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.c +++ b/drivers/gpu/drm/sti/sti_hdmi.c @@ -18,6 +18,8 @@ #include <drm/drm_crtc_helper.h> #include <drm/drm_edid.h> +#include <sound/hdmi-codec.h> + #include "sti_hdmi.h" #include "sti_hdmi_tx3g4c28phy.h" #include "sti_hdmi_tx3g0c55phy.h" @@ -35,6 +37,8 @@ #define HDMI_DFLT_CHL0_DAT 0x0110 #define HDMI_DFLT_CHL1_DAT 0x0114 #define HDMI_DFLT_CHL2_DAT 0x0118 +#define HDMI_AUDIO_CFG 0x0200 +#define HDMI_SPDIF_FIFO_STATUS 0x0204 #define HDMI_SW_DI_1_HEAD_WORD 0x0210 #define HDMI_SW_DI_1_PKT_WORD0 0x0214 #define HDMI_SW_DI_1_PKT_WORD1 0x0218 @@ -44,6 +48,9 @@ #define HDMI_SW_DI_1_PKT_WORD5 0x0228 #define HDMI_SW_DI_1_PKT_WORD6 0x022C #define HDMI_SW_DI_CFG 0x0230 +#define HDMI_SAMPLE_FLAT_MASK 0x0244 +#define HDMI_AUDN 0x0400 +#define HDMI_AUD_CTS 0x0404 #define HDMI_SW_DI_2_HEAD_WORD 0x0600 #define HDMI_SW_DI_2_PKT_WORD0 0x0604 #define HDMI_SW_DI_2_PKT_WORD1 0x0608 @@ -103,6 +110,7 @@ #define HDMI_INT_DLL_LCK BIT(5) #define HDMI_INT_NEW_FRAME BIT(6) #define HDMI_INT_GENCTRL_PKT BIT(7) +#define HDMI_INT_AUDIO_FIFO_XRUN BIT(8) #define HDMI_INT_SINK_TERM_PRESENT BIT(11) #define HDMI_DEFAULT_INT (HDMI_INT_SINK_TERM_PRESENT \ @@ -111,6 +119,7 @@ | HDMI_INT_GLOBAL) #define HDMI_WORKING_INT (HDMI_INT_SINK_TERM_PRESENT \ + | HDMI_INT_AUDIO_FIFO_XRUN \ | HDMI_INT_GENCTRL_PKT \ | HDMI_INT_NEW_FRAME \ | HDMI_INT_DLL_LCK \ @@ -121,6 +130,27 @@ #define HDMI_STA_SW_RST BIT(1) +#define HDMI_AUD_CFG_8CH BIT(0) +#define HDMI_AUD_CFG_SPDIF_DIV_2 BIT(1) +#define HDMI_AUD_CFG_SPDIF_DIV_3 BIT(2) +#define HDMI_AUD_CFG_SPDIF_CLK_DIV_4 (BIT(1) | BIT(2)) +#define HDMI_AUD_CFG_CTS_CLK_256FS BIT(12) +#define HDMI_AUD_CFG_DTS_INVALID BIT(16) +#define HDMI_AUD_CFG_ONE_BIT_INVALID (BIT(18) | BIT(19) | BIT(20) | BIT(21)) +#define HDMI_AUD_CFG_CH12_VALID BIT(28) +#define HDMI_AUD_CFG_CH34_VALID BIT(29) +#define HDMI_AUD_CFG_CH56_VALID BIT(30) +#define HDMI_AUD_CFG_CH78_VALID BIT(31) + +/* sample flat mask */ +#define HDMI_SAMPLE_FLAT_NO 0 +#define HDMI_SAMPLE_FLAT_SP0 BIT(0) +#define HDMI_SAMPLE_FLAT_SP1 BIT(1) +#define HDMI_SAMPLE_FLAT_SP2 BIT(2) +#define HDMI_SAMPLE_FLAT_SP3 BIT(3) +#define HDMI_SAMPLE_FLAT_ALL (HDMI_SAMPLE_FLAT_SP0 | HDMI_SAMPLE_FLAT_SP1 |\ + HDMI_SAMPLE_FLAT_SP2 | HDMI_SAMPLE_FLAT_SP3) + #define HDMI_INFOFRAME_HEADER_TYPE(x) (((x) & 0xff) << 0) #define HDMI_INFOFRAME_HEADER_VERSION(x) (((x) & 0xff) << 8) #define HDMI_INFOFRAME_HEADER_LEN(x) (((x) & 0x0f) << 16) @@ -171,6 +201,10 @@ static irqreturn_t hdmi_irq_thread(int irq, void *arg) wake_up_interruptible(&hdmi->wait_event); } + /* Audio FIFO underrun IRQ */ + if (hdmi->irq_status & HDMI_INT_AUDIO_FIFO_XRUN) + DRM_INFO("Warning: audio FIFO underrun occurs!"); + return IRQ_HANDLED; } @@ -441,26 +475,29 @@ static int hdmi_avi_infoframe_config(struct sti_hdmi *hdmi) */ static int hdmi_audio_infoframe_config(struct sti_hdmi *hdmi) { - struct hdmi_audio_infoframe infofame; + struct hdmi_audio_params *audio = &hdmi->audio; u8 buffer[HDMI_INFOFRAME_SIZE(AUDIO)]; - int ret; - - ret = hdmi_audio_infoframe_init(&infofame); - if (ret < 0) { - DRM_ERROR("failed to setup audio infoframe: %d\n", ret); - return ret; - } - - infofame.channels = 2; - - ret = hdmi_audio_infoframe_pack(&infofame, buffer, sizeof(buffer)); - if (ret < 0) { - DRM_ERROR("failed to pack audio infoframe: %d\n", ret); - return ret; + int ret, val; + + DRM_DEBUG_DRIVER("enter %s, AIF %s\n", __func__, + audio->enabled ? "enable" : "disable"); + if (audio->enabled) { + /* set audio parameters stored*/ + ret = hdmi_audio_infoframe_pack(&audio->cea, buffer, + sizeof(buffer)); + if (ret < 0) { + DRM_ERROR("failed to pack audio infoframe: %d\n", ret); + return ret; + } + hdmi_infoframe_write_infopack(hdmi, buffer, ret); + } else { + /*disable audio info frame transmission */ + val = hdmi_read(hdmi, HDMI_SW_DI_CFG); + val &= ~HDMI_IFRAME_CFG_DI_N(HDMI_IFRAME_MASK, + HDMI_IFRAME_SLOT_AUDIO); + hdmi_write(hdmi, val, HDMI_SW_DI_CFG); } - hdmi_infoframe_write_infopack(hdmi, buffer, ret); - return 0; } @@ -650,6 +687,10 @@ static int hdmi_dbg_show(struct seq_file *s, void *data) DBGFS_DUMP("", HDMI_SW_DI_CFG); hdmi_dbg_sw_di_cfg(s, hdmi_read(hdmi, HDMI_SW_DI_CFG)); + DBGFS_DUMP("\n", HDMI_AUDIO_CFG); + DBGFS_DUMP("\n", HDMI_SPDIF_FIFO_STATUS); + DBGFS_DUMP("\n", HDMI_AUDN); + seq_printf(s, "\n AVI Infoframe (Data Island slot N=%d):", HDMI_IFRAME_SLOT_AVI); DBGFS_DUMP_DI(HDMI_SW_DI_N_HEAD_WORD, HDMI_IFRAME_SLOT_AVI); @@ -854,6 +895,7 @@ static int sti_hdmi_connector_get_modes(struct drm_connector *connector) count = drm_add_edid_modes(connector, edid); drm_mode_connector_update_edid_property(connector, edid); + drm_edid_to_eld(connector, edid); kfree(edid); return count; @@ -1036,6 +1078,207 @@ static struct drm_encoder *sti_hdmi_find_encoder(struct drm_device *dev) return NULL; } +/** + * sti_hdmi_audio_get_non_coherent_n() - get N parameter for non-coherent + * clocks. None-coherent clocks means that audio and TMDS clocks have not the + * same source (drifts between clocks). In this case assumption is that CTS is + * automatically calculated by hardware. + * + * @audio_fs: audio frame clock frequency in Hz + * + * Values computed are based on table described in HDMI specification 1.4b + * + * Returns n value. + */ +static int sti_hdmi_audio_get_non_coherent_n(unsigned int audio_fs) +{ + unsigned int n; + + switch (audio_fs) { + case 32000: + n = 4096; + break; + case 44100: + n = 6272; + break; + case 48000: + n = 6144; + break; + case 88200: + n = 6272 * 2; + break; + case 96000: + n = 6144 * 2; + break; + case 176400: + n = 6272 * 4; + break; + case 192000: + n = 6144 * 4; + break; + default: + /* Not pre-defined, recommended value: 128 * fs / 1000 */ + n = (audio_fs * 128) / 1000; + } + + return n; +} + +static int hdmi_audio_configure(struct sti_hdmi *hdmi, + struct hdmi_audio_params *params) +{ + int audio_cfg, n; + struct hdmi_audio_infoframe *info = ¶ms->cea; + + DRM_DEBUG_DRIVER("\n"); + + if (!hdmi->enabled) + return 0; + + /* update N parameter */ + n = sti_hdmi_audio_get_non_coherent_n(params->sample_rate); + + DRM_DEBUG_DRIVER("Audio rate = %d Hz, TMDS clock = %d Hz, n = %d\n", + params->sample_rate, hdmi->mode.clock * 1000, n); + hdmi_write(hdmi, n, HDMI_AUDN); + + /* update HDMI registers according to configuration */ + audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID | + HDMI_AUD_CFG_ONE_BIT_INVALID; + + switch (info->channels) { + case 8: + audio_cfg |= HDMI_AUD_CFG_CH78_VALID; + case 6: + audio_cfg |= HDMI_AUD_CFG_CH56_VALID; + case 4: + audio_cfg |= HDMI_AUD_CFG_CH34_VALID | HDMI_AUD_CFG_8CH; + case 2: + audio_cfg |= HDMI_AUD_CFG_CH12_VALID; + break; + default: + DRM_ERROR("ERROR: Unsupported number of channels (%d)!\n", + info->channels); + return -EINVAL; + } + + hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG); + + hdmi->audio = *params; + + return hdmi_audio_infoframe_config(hdmi); +} + +static void hdmi_audio_shutdown(struct device *dev, void *data) +{ + struct sti_hdmi *hdmi = dev_get_drvdata(dev); + int audio_cfg; + + DRM_DEBUG_DRIVER("\n"); + + /* disable audio */ + audio_cfg = HDMI_AUD_CFG_SPDIF_DIV_2 | HDMI_AUD_CFG_DTS_INVALID | + HDMI_AUD_CFG_ONE_BIT_INVALID; + hdmi_write(hdmi, audio_cfg, HDMI_AUDIO_CFG); + + hdmi->audio.enabled = 0; + hdmi_audio_infoframe_config(hdmi); +} + +static int hdmi_audio_hw_params(struct device *dev, + void *data, + struct hdmi_codec_daifmt *daifmt, + struct hdmi_codec_params *params) +{ + struct sti_hdmi *hdmi = dev_get_drvdata(dev); + int ret; + struct hdmi_audio_params audio = { + .sample_width = params->sample_width, + .sample_rate = params->sample_rate, + .cea = params->cea, + }; + + DRM_DEBUG_DRIVER("\n"); + + if (!hdmi->enabled) + return 0; + + if ((daifmt->fmt != HDMI_I2S) || daifmt->bit_clk_inv || + daifmt->frame_clk_inv || daifmt->bit_clk_master || + daifmt->frame_clk_master) { + dev_err(dev, "%s: Bad flags %d %d %d %d\n", __func__, + daifmt->bit_clk_inv, daifmt->frame_clk_inv, + daifmt->bit_clk_master, + daifmt->frame_clk_master); + return -EINVAL; + } + + audio.enabled = 1; + + ret = hdmi_audio_configure(hdmi, &audio); + if (ret < 0) + return ret; + + return 0; +} + +static int hdmi_audio_digital_mute(struct device *dev, void *data, bool enable) +{ + struct sti_hdmi *hdmi = dev_get_drvdata(dev); + + DRM_DEBUG_DRIVER("%s\n", enable ? "enable" : "disable"); + + if (enable) + hdmi_write(hdmi, HDMI_SAMPLE_FLAT_ALL, HDMI_SAMPLE_FLAT_MASK); + else + hdmi_write(hdmi, HDMI_SAMPLE_FLAT_NO, HDMI_SAMPLE_FLAT_MASK); + + return 0; +} + +static int hdmi_audio_get_eld(struct device *dev, void *data, uint8_t *buf, size_t len) +{ + struct sti_hdmi *hdmi = dev_get_drvdata(dev); + struct drm_connector *connector = hdmi->drm_connector; + + DRM_DEBUG_DRIVER("\n"); + memcpy(buf, connector->eld, min(sizeof(connector->eld), len)); + + return 0; +} + +static const struct hdmi_codec_ops audio_codec_ops = { + .hw_params = hdmi_audio_hw_params, + .audio_shutdown = hdmi_audio_shutdown, + .digital_mute = hdmi_audio_digital_mute, + .get_eld = hdmi_audio_get_eld, +}; + +static int sti_hdmi_register_audio_driver(struct device *dev, + struct sti_hdmi *hdmi) +{ + struct hdmi_codec_pdata codec_data = { + .ops = &audio_codec_ops, + .max_i2s_channels = 8, + .i2s = 1, + }; + + DRM_DEBUG_DRIVER("\n"); + + hdmi->audio.enabled = 0; + + hdmi->audio_pdev = platform_device_register_data( + dev, HDMI_CODEC_DRV_NAME, PLATFORM_DEVID_AUTO, + &codec_data, sizeof(codec_data)); + + if (IS_ERR(hdmi->audio_pdev)) + return PTR_ERR(hdmi->audio_pdev); + + DRM_INFO("%s Driver bound %s\n", HDMI_CODEC_DRV_NAME, dev_name(dev)); + + return 0; +} + static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) { struct sti_hdmi *hdmi = dev_get_drvdata(dev); @@ -1082,12 +1325,27 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) /* initialise property */ sti_hdmi_connector_init_property(drm_dev, drm_connector); + hdmi->drm_connector = drm_connector; + err = drm_mode_connector_attach_encoder(drm_connector, encoder); if (err) { DRM_ERROR("Failed to attach a connector to a encoder\n"); goto err_sysfs; } + err = sti_hdmi_register_audio_driver(dev, hdmi); + if (err) { + DRM_ERROR("Failed to attach an audio codec\n"); + goto err_sysfs; + } + + /* Initialize audio infoframe */ + err = hdmi_audio_infoframe_init(&hdmi->audio.cea); + if (err) { + DRM_ERROR("Failed to init audio infoframe\n"); + goto err_sysfs; + } + /* Enable default interrupts */ hdmi_write(hdmi, HDMI_DEFAULT_INT, HDMI_INT_EN); @@ -1095,6 +1353,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data) err_sysfs: drm_bridge_remove(bridge); + hdmi->drm_connector = NULL; return -EINVAL; } @@ -1244,6 +1503,8 @@ static int sti_hdmi_remove(struct platform_device *pdev) struct sti_hdmi *hdmi = dev_get_drvdata(&pdev->dev); i2c_put_adapter(hdmi->ddc_adapt); + if (hdmi->audio_pdev) + platform_device_unregister(hdmi->audio_pdev); component_del(&pdev->dev, &sti_hdmi_ops); return 0; diff --git a/drivers/gpu/drm/sti/sti_hdmi.h b/drivers/gpu/drm/sti/sti_hdmi.h index ef3a94583bbd..119bc3582ac7 100644 --- a/drivers/gpu/drm/sti/sti_hdmi.h +++ b/drivers/gpu/drm/sti/sti_hdmi.h @@ -23,6 +23,13 @@ struct hdmi_phy_ops { void (*stop)(struct sti_hdmi *hdmi); }; +struct hdmi_audio_params { + bool enabled; + unsigned int sample_width; + unsigned int sample_rate; + struct hdmi_audio_infoframe cea; +}; + /* values for the framing mode property */ enum sti_hdmi_modes { HDMI_MODE_HDMI, @@ -67,6 +74,9 @@ static const struct drm_prop_enum_list colorspace_mode_names[] = { * @ddc_adapt: i2c ddc adapter * @colorspace: current colorspace selected * @hdmi_mode: select framing for HDMI or DVI + * @audio_pdev: ASoC hdmi-codec platform device + * @audio: hdmi audio parameters. + * @drm_connector: hdmi connector */ struct sti_hdmi { struct device dev; @@ -89,6 +99,9 @@ struct sti_hdmi { struct i2c_adapter *ddc_adapt; enum hdmi_colorspace colorspace; enum sti_hdmi_modes hdmi_mode; + struct platform_device *audio_pdev; + struct hdmi_audio_params audio; + struct drm_connector *drm_connector; }; u32 hdmi_read(struct sti_hdmi *hdmi, int offset); diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 33d2f42550cc..b03232247966 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -1363,6 +1363,7 @@ static int sti_hqvdp_probe(struct platform_device *pdev) vtg_np = of_parse_phandle(pdev->dev.of_node, "st,vtg", 0); if (vtg_np) hqvdp->vtg = of_vtg_find(vtg_np); + of_node_put(vtg_np); platform_set_drvdata(pdev, hqvdp); diff --git a/drivers/gpu/drm/sti/sti_plane.c b/drivers/gpu/drm/sti/sti_plane.c index 85cee9098439..0cf3335ef37c 100644 --- a/drivers/gpu/drm/sti/sti_plane.c +++ b/drivers/gpu/drm/sti/sti_plane.c @@ -45,25 +45,15 @@ const char *sti_plane_to_str(struct sti_plane *plane) #define STI_FPS_INTERVAL_MS 3000 -static int sti_plane_timespec_ms_diff(struct timespec lhs, struct timespec rhs) -{ - struct timespec tmp_ts = timespec_sub(lhs, rhs); - u64 tmp_ns = (u64)timespec_to_ns(&tmp_ts); - - do_div(tmp_ns, NSEC_PER_MSEC); - - return (u32)tmp_ns; -} - void sti_plane_update_fps(struct sti_plane *plane, bool new_frame, bool new_field) { - struct timespec now; + ktime_t now; struct sti_fps_info *fps; int fpks, fipks, ms_since_last, num_frames, num_fields; - getrawmonotonic(&now); + now = ktime_get(); /* Compute number of frame updates */ fps = &plane->fps_info; @@ -76,7 +66,7 @@ void sti_plane_update_fps(struct sti_plane *plane, return; fps->curr_frame_counter++; - ms_since_last = sti_plane_timespec_ms_diff(now, fps->last_timestamp); + ms_since_last = ktime_to_ms(ktime_sub(now, fps->last_timestamp)); num_frames = fps->curr_frame_counter - fps->last_frame_counter; if (num_frames <= 0 || ms_since_last < STI_FPS_INTERVAL_MS) diff --git a/drivers/gpu/drm/sti/sti_plane.h b/drivers/gpu/drm/sti/sti_plane.h index 39d39f5b7dd9..e0ea1dd3bb88 100644 --- a/drivers/gpu/drm/sti/sti_plane.h +++ b/drivers/gpu/drm/sti/sti_plane.h @@ -55,7 +55,7 @@ struct sti_fps_info { unsigned int last_frame_counter; unsigned int curr_field_counter; unsigned int last_field_counter; - struct timespec last_timestamp; + ktime_t last_timestamp; char fps_str[FPS_LENGTH]; char fips_str[FPS_LENGTH]; }; diff --git a/drivers/gpu/drm/sti/sti_vtg.c b/drivers/gpu/drm/sti/sti_vtg.c index 6bf4ce466d20..0bdc385eec17 100644 --- a/drivers/gpu/drm/sti/sti_vtg.c +++ b/drivers/gpu/drm/sti/sti_vtg.c @@ -65,7 +65,7 @@ #define HDMI_DELAY (5) /* Delay introduced by the DVO in nb of pixel */ -#define DVO_DELAY (2) +#define DVO_DELAY (7) /* delay introduced by the Arbitrary Waveform Generator in nb of pixels */ #define AWG_DELAY_HD (-9) @@ -432,6 +432,7 @@ static int vtg_probe(struct platform_device *pdev) np = of_parse_phandle(pdev->dev.of_node, "st,slave", 0); if (np) { vtg->slave = of_vtg_find(np); + of_node_put(np); if (!vtg->slave) return -EPROBE_DEFER; diff --git a/drivers/gpu/drm/sun4i/Kconfig b/drivers/gpu/drm/sun4i/Kconfig index 99510e64e91a..a4b357db8856 100644 --- a/drivers/gpu/drm/sun4i/Kconfig +++ b/drivers/gpu/drm/sun4i/Kconfig @@ -1,6 +1,6 @@ config DRM_SUN4I tristate "DRM Support for Allwinner A10 Display Engine" - depends on DRM && ARM + depends on DRM && ARM && COMMON_CLK depends on ARCH_SUNXI || COMPILE_TEST select DRM_GEM_CMA_HELPER select DRM_KMS_HELPER diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c index f7a15c1a93bf..3ab560450a82 100644 --- a/drivers/gpu/drm/sun4i/sun4i_backend.c +++ b/drivers/gpu/drm/sun4i/sun4i_backend.c @@ -190,7 +190,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend, /* Get the physical address of the buffer in memory */ gem = drm_fb_cma_get_gem_obj(fb, 0); - DRM_DEBUG_DRIVER("Using GEM @ 0x%x\n", gem->paddr); + DRM_DEBUG_DRIVER("Using GEM @ %pad\n", &gem->paddr); /* Compute the start of the displayed memory */ bpp = drm_format_plane_cpp(fb->pixel_format, 0); @@ -198,7 +198,7 @@ int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend, paddr += (state->src_x >> 16) * bpp; paddr += (state->src_y >> 16) * fb->pitches[0]; - DRM_DEBUG_DRIVER("Setting buffer address to 0x%x\n", paddr); + DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr); /* Write the 32 lower bits of the address (in bits) */ lo_paddr = paddr << 3; diff --git a/drivers/gpu/drm/sun4i/sun4i_dotclock.c b/drivers/gpu/drm/sun4i/sun4i_dotclock.c index 3ff668cb463c..5b3463197c48 100644 --- a/drivers/gpu/drm/sun4i/sun4i_dotclock.c +++ b/drivers/gpu/drm/sun4i/sun4i_dotclock.c @@ -72,14 +72,40 @@ static unsigned long sun4i_dclk_recalc_rate(struct clk_hw *hw, static long sun4i_dclk_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { - return *parent_rate / DIV_ROUND_CLOSEST(*parent_rate, rate); + unsigned long best_parent = 0; + u8 best_div = 1; + int i; + + for (i = 6; i < 127; i++) { + unsigned long ideal = rate * i; + unsigned long rounded; + + rounded = clk_hw_round_rate(clk_hw_get_parent(hw), + ideal); + + if (rounded == ideal) { + best_parent = rounded; + best_div = i; + goto out; + } + + if ((rounded < ideal) && (rounded > best_parent)) { + best_parent = rounded; + best_div = i; + } + } + +out: + *parent_rate = best_parent; + + return best_parent / best_div; } static int sun4i_dclk_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct sun4i_dclk *dclk = hw_to_dclk(hw); - int div = DIV_ROUND_CLOSEST(parent_rate, rate); + u8 div = parent_rate / rate; return regmap_update_bits(dclk->regmap, SUN4I_TCON0_DCLK_REG, GENMASK(6, 0), div); @@ -127,10 +153,14 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon) const char *clk_name, *parent_name; struct clk_init_data init; struct sun4i_dclk *dclk; + int ret; parent_name = __clk_get_name(tcon->sclk0); - of_property_read_string_index(dev->of_node, "clock-output-names", 0, - &clk_name); + ret = of_property_read_string_index(dev->of_node, + "clock-output-names", 0, + &clk_name); + if (ret) + return ret; dclk = devm_kzalloc(dev, sizeof(*dclk), GFP_KERNEL); if (!dclk) @@ -140,6 +170,7 @@ int sun4i_dclk_create(struct device *dev, struct sun4i_tcon *tcon) init.ops = &sun4i_dclk_ops; init.parent_names = &parent_name; init.num_parents = 1; + init.flags = CLK_SET_RATE_PARENT; dclk->regmap = tcon->regs; dclk->hw.init = &init; diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 9a67f927a53e..5b89940edcb1 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -97,6 +97,22 @@ static struct drm_driver sun4i_drv_driver = { .disable_vblank = sun4i_drv_disable_vblank, }; +static void sun4i_remove_framebuffers(void) +{ + struct apertures_struct *ap; + + ap = alloc_apertures(1); + if (!ap) + return; + + /* The framebuffer can be located anywhere in RAM */ + ap->ranges[0].base = 0; + ap->ranges[0].size = ~0; + + remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false); + kfree(ap); +} + static int sun4i_drv_bind(struct device *dev) { struct drm_device *drm; @@ -140,6 +156,9 @@ static int sun4i_drv_bind(struct device *dev) } drm->irq_enabled = true; + /* Remove early framebuffers (ie. simplefb) */ + sun4i_remove_framebuffers(); + /* Create our framebuffer */ drv->fbdev = sun4i_framebuffer_init(drm); if (IS_ERR(drv->fbdev)) { @@ -166,6 +185,7 @@ static void sun4i_drv_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); + drm_connector_unregister_all(drm); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); sun4i_framebuffer_free(drm); diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c index 442cfe271688..f5bbac6efb4c 100644 --- a/drivers/gpu/drm/sun4i/sun4i_rgb.c +++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c @@ -54,8 +54,13 @@ static int sun4i_rgb_get_modes(struct drm_connector *connector) static int sun4i_rgb_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { + struct sun4i_rgb *rgb = drm_connector_to_sun4i_rgb(connector); + struct sun4i_drv *drv = rgb->drv; + struct sun4i_tcon *tcon = drv->tcon; u32 hsync = mode->hsync_end - mode->hsync_start; u32 vsync = mode->vsync_end - mode->vsync_start; + unsigned long rate = mode->clock * 1000; + long rounded_rate; DRM_DEBUG_DRIVER("Validating modes...\n"); @@ -87,6 +92,15 @@ static int sun4i_rgb_mode_valid(struct drm_connector *connector, DRM_DEBUG_DRIVER("Vertical parameters OK\n"); + rounded_rate = clk_round_rate(tcon->dclk, rate); + if (rounded_rate < rate) + return MODE_CLOCK_LOW; + + if (rounded_rate > rate) + return MODE_CLOCK_HIGH; + + DRM_DEBUG_DRIVER("Clock rate OK\n"); + return MODE_OK; } @@ -193,7 +207,7 @@ int sun4i_rgb_init(struct drm_device *drm) int ret; /* If we don't have a panel, there's no point in going on */ - if (!tcon->panel) + if (IS_ERR(tcon->panel)) return -ENODEV; rgb = devm_kzalloc(drm->dev, sizeof(*rgb), GFP_KERNEL); diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index 9f19b0e08560..652385f09735 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -425,11 +425,11 @@ static struct drm_panel *sun4i_tcon_find_panel(struct device_node *node) remote = of_graph_get_remote_port_parent(end_node); if (!remote) { - DRM_DEBUG_DRIVER("Enable to parse remote node\n"); + DRM_DEBUG_DRIVER("Unable to parse remote node\n"); return ERR_PTR(-EINVAL); } - return of_drm_find_panel(remote); + return of_drm_find_panel(remote) ?: ERR_PTR(-EPROBE_DEFER); } static int sun4i_tcon_bind(struct device *dev, struct device *master, @@ -490,7 +490,11 @@ static int sun4i_tcon_bind(struct device *dev, struct device *master, return 0; } - return sun4i_rgb_init(drm); + ret = sun4i_rgb_init(drm); + if (ret < 0) + goto err_free_clocks; + + return 0; err_free_clocks: sun4i_tcon_free_clocks(tcon); @@ -522,12 +526,13 @@ static int sun4i_tcon_probe(struct platform_device *pdev) * Defer the probe. */ panel = sun4i_tcon_find_panel(node); - if (IS_ERR(panel)) { - /* - * If we don't have a panel endpoint, just go on - */ - if (PTR_ERR(panel) != -ENODEV) - return -EPROBE_DEFER; + + /* + * If we don't have a panel endpoint, just go on + */ + if (PTR_ERR(panel) == -EPROBE_DEFER) { + DRM_DEBUG_DRIVER("Still waiting for our panel. Deferring...\n"); + return -EPROBE_DEFER; } return component_add(&pdev->dev, &sun4i_tcon_ops); diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c index 79027b1c64d3..107c8bd04f6d 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c @@ -697,7 +697,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc) spin_unlock_irqrestore(&tilcdc_crtc->irq_lock, flags); - drm_handle_vblank(dev, 0); + drm_crtc_handle_vblank(crtc); if (!skip_event) { struct drm_pending_vblank_event *event; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 39386f50af87..e340d0d66429 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -147,9 +147,9 @@ static void ttm_bo_release_list(struct kref *list_kref) BUG_ON(!list_empty(&bo->lru)); BUG_ON(!list_empty(&bo->ddestroy)); - if (bo->ttm) - ttm_tt_destroy(bo->ttm); + ttm_tt_destroy(bo->ttm); atomic_dec(&bo->glob->bo_count); + fence_put(bo->moving); if (bo->resv == &bo->ttm_resv) reservation_object_fini(&bo->ttm_resv); mutex_destroy(&bo->wu_mutex); @@ -360,7 +360,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ret = bdev->driver->move(bo, evict, interruptible, no_wait_gpu, mem); else - ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, mem); + ret = ttm_bo_move_memcpy(bo, evict, interruptible, + no_wait_gpu, mem); if (ret) { if (bdev->driver->move_notify) { @@ -396,8 +397,7 @@ moved: out_err: new_man = &bdev->man[bo->mem.mem_type]; - if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { - ttm_tt_unbind(bo->ttm); + if (new_man->flags & TTM_MEMTYPE_FLAG_FIXED) { ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } @@ -418,11 +418,8 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) if (bo->bdev->driver->move_notify) bo->bdev->driver->move_notify(bo, NULL); - if (bo->ttm) { - ttm_tt_unbind(bo->ttm); - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } + ttm_tt_destroy(bo->ttm); + bo->ttm = NULL; ttm_bo_mem_put(bo, &bo->mem); ww_mutex_unlock (&bo->resv->lock); @@ -688,15 +685,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, struct ttm_placement placement; int ret = 0; - ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); - - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) { - pr_err("Failed to expire sync object before buffer eviction\n"); - } - goto out; - } - lockdep_assert_held(&bo->resv->lock.base); evict_mem = bo->mem; @@ -720,7 +708,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible, ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible, no_wait_gpu); - if (ret) { + if (unlikely(ret)) { if (ret != -ERESTARTSYS) pr_err("Buffer eviction failed\n"); ttm_bo_mem_put(bo, &evict_mem); @@ -800,6 +788,34 @@ void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) EXPORT_SYMBOL(ttm_bo_mem_put); /** + * Add the last move fence to the BO and reserve a new shared slot. + */ +static int ttm_bo_add_move_fence(struct ttm_buffer_object *bo, + struct ttm_mem_type_manager *man, + struct ttm_mem_reg *mem) +{ + struct fence *fence; + int ret; + + spin_lock(&man->move_lock); + fence = fence_get(man->move); + spin_unlock(&man->move_lock); + + if (fence) { + reservation_object_add_shared_fence(bo->resv, fence); + + ret = reservation_object_reserve_shared(bo->resv); + if (unlikely(ret)) + return ret; + + fence_put(bo->moving); + bo->moving = fence; + } + + return 0; +} + +/** * Repeatedly evict memory from the LRU for @mem_type until we create enough * space, or we've evicted everything and there isn't enough space. */ @@ -825,10 +841,8 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, if (unlikely(ret != 0)) return ret; } while (1); - if (mem->mm_node == NULL) - return -ENOMEM; mem->mem_type = mem_type; - return 0; + return ttm_bo_add_move_fence(bo, man, mem); } static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man, @@ -898,6 +912,10 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, bool has_erestartsys = false; int i, ret; + ret = reservation_object_reserve_shared(bo->resv); + if (unlikely(ret)) + return ret; + mem->mm_node = NULL; for (i = 0; i < placement->num_placement; ++i) { const struct ttm_place *place = &placement->placement[i]; @@ -931,9 +949,15 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, ret = (*man->func->get_node)(man, bo, place, mem); if (unlikely(ret)) return ret; - - if (mem->mm_node) + + if (mem->mm_node) { + ret = ttm_bo_add_move_fence(bo, man, mem); + if (unlikely(ret)) { + (*man->func->put_node)(man, mem); + return ret; + } break; + } } if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) { @@ -1000,20 +1024,6 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, lockdep_assert_held(&bo->resv->lock.base); - /* - * Don't wait for the BO on initial allocation. This is important when - * the BO has an imported reservation object. - */ - if (bo->mem.mem_type != TTM_PL_SYSTEM || bo->ttm != NULL) { - /* - * FIXME: It's possible to pipeline buffer moves. - * Have the driver move function wait for idle when necessary, - * instead of doing it here. - */ - ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); - if (ret) - return ret; - } mem.num_pages = bo->num_pages; mem.size = mem.num_pages << PAGE_SHIFT; mem.page_alignment = bo->mem.page_alignment; @@ -1165,7 +1175,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->mem.page_alignment = page_alignment; bo->mem.bus.io_reserved_vm = false; bo->mem.bus.io_reserved_count = 0; - bo->priv_flags = 0; + bo->moving = NULL; bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED); bo->persistent_swap_storage = persistent_swap_storage; bo->acc_size = acc_size; @@ -1277,6 +1287,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, { struct ttm_mem_type_manager *man = &bdev->man[mem_type]; struct ttm_bo_global *glob = bdev->glob; + struct fence *fence; int ret; /* @@ -1297,6 +1308,23 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev, spin_lock(&glob->lru_lock); } spin_unlock(&glob->lru_lock); + + spin_lock(&man->move_lock); + fence = fence_get(man->move); + spin_unlock(&man->move_lock); + + if (fence) { + ret = fence_wait(fence, false); + fence_put(fence); + if (ret) { + if (allow_errors) { + return ret; + } else { + pr_err("Cleanup eviction failed\n"); + } + } + } + return 0; } @@ -1316,6 +1344,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type) mem_type); return ret; } + fence_put(man->move); man->use_type = false; man->has_type = false; @@ -1361,6 +1390,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, man->io_reserve_fastpath = true; man->use_io_reserve_lru = false; mutex_init(&man->io_reserve_mutex); + spin_lock_init(&man->move_lock); INIT_LIST_HEAD(&man->io_reserve_lru); ret = bdev->driver->init_mem_type(bdev, type, man); @@ -1379,6 +1409,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type, man->size = p_size; INIT_LIST_HEAD(&man->lru); + man->move = NULL; return 0; } @@ -1572,47 +1603,17 @@ EXPORT_SYMBOL(ttm_bo_unmap_virtual); int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait) { - struct reservation_object_list *fobj; - struct reservation_object *resv; - struct fence *excl; - long timeout = 15 * HZ; - int i; - - resv = bo->resv; - fobj = reservation_object_get_list(resv); - excl = reservation_object_get_excl(resv); - if (excl) { - if (!fence_is_signaled(excl)) { - if (no_wait) - return -EBUSY; - - timeout = fence_wait_timeout(excl, - interruptible, timeout); - } - } - - for (i = 0; fobj && timeout > 0 && i < fobj->shared_count; ++i) { - struct fence *fence; - fence = rcu_dereference_protected(fobj->shared[i], - reservation_object_held(resv)); - - if (!fence_is_signaled(fence)) { - if (no_wait) - return -EBUSY; - - timeout = fence_wait_timeout(fence, - interruptible, timeout); - } - } + long timeout = no_wait ? 0 : 15 * HZ; + timeout = reservation_object_wait_timeout_rcu(bo->resv, true, + interruptible, timeout); if (timeout < 0) return timeout; if (timeout == 0) return -EBUSY; - reservation_object_add_excl_fence(resv, NULL); - clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); + reservation_object_add_excl_fence(bo->resv, NULL); return 0; } EXPORT_SYMBOL(ttm_bo_wait); @@ -1682,14 +1683,9 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) ttm_bo_list_ref_sub(bo, put_count, true); /** - * Wait for GPU, then move to system cached. + * Move to system cached */ - ret = ttm_bo_wait(bo, false, false); - - if (unlikely(ret != 0)) - goto out; - if ((bo->mem.placement & swap_placement) != swap_placement) { struct ttm_mem_reg evict_mem; @@ -1704,6 +1700,14 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) goto out; } + /** + * Make sure BO is idle. + */ + + ret = ttm_bo_wait(bo, false, false); + if (unlikely(ret != 0)) + goto out; + ttm_bo_unmap_virtual(bo); /** diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index d9831559706e..4da0e784f9e7 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -53,7 +53,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, int ret; if (old_mem->mem_type != TTM_PL_SYSTEM) { - ttm_tt_unbind(ttm); ttm_bo_free_old_node(bo); ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, TTM_PL_MASK_MEM); @@ -321,7 +320,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, } int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, + bool evict, bool interruptible, + bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; @@ -337,6 +337,10 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, unsigned long add = 0; int dir; + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); + if (ret) + return ret; + ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); if (ret) return ret; @@ -401,8 +405,7 @@ out2: *old_mem = *new_mem; new_mem->mm_node = NULL; - if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { - ttm_tt_unbind(ttm); + if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { ttm_tt_destroy(ttm); bo->ttm = NULL; } @@ -462,6 +465,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); INIT_LIST_HEAD(&fbo->io_reserve_lru); + fbo->moving = NULL; drm_vma_node_reset(&fbo->vma_node); atomic_set(&fbo->cpu_writers, 0); @@ -634,7 +638,6 @@ EXPORT_SYMBOL(ttm_bo_kunmap); int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct fence *fence, bool evict, - bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; @@ -649,9 +652,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, if (ret) return ret; - if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && - (bo->ttm != NULL)) { - ttm_tt_unbind(bo->ttm); + if (man->flags & TTM_MEMTYPE_FLAG_FIXED) { ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } @@ -665,7 +666,8 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, * operation has completed. */ - set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); + fence_put(bo->moving); + bo->moving = fence_get(fence); ret = ttm_buffer_object_transfer(bo, &ghost_obj); if (ret) @@ -694,3 +696,95 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, return 0; } EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); + +int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, + struct fence *fence, bool evict, + struct ttm_mem_reg *new_mem) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_mem_reg *old_mem = &bo->mem; + + struct ttm_mem_type_manager *from = &bdev->man[old_mem->mem_type]; + struct ttm_mem_type_manager *to = &bdev->man[new_mem->mem_type]; + + int ret; + + reservation_object_add_excl_fence(bo->resv, fence); + + if (!evict) { + struct ttm_buffer_object *ghost_obj; + + /** + * This should help pipeline ordinary buffer moves. + * + * Hang old buffer memory on a new buffer object, + * and leave it to be released when the GPU + * operation has completed. + */ + + fence_put(bo->moving); + bo->moving = fence_get(fence); + + ret = ttm_buffer_object_transfer(bo, &ghost_obj); + if (ret) + return ret; + + reservation_object_add_excl_fence(ghost_obj->resv, fence); + + /** + * If we're not moving to fixed memory, the TTM object + * needs to stay alive. Otherwhise hang it on the ghost + * bo to be unbound and destroyed. + */ + + if (!(to->flags & TTM_MEMTYPE_FLAG_FIXED)) + ghost_obj->ttm = NULL; + else + bo->ttm = NULL; + + ttm_bo_unreserve(ghost_obj); + ttm_bo_unref(&ghost_obj); + + } else if (from->flags & TTM_MEMTYPE_FLAG_FIXED) { + + /** + * BO doesn't have a TTM we need to bind/unbind. Just remember + * this eviction and free up the allocation + */ + + spin_lock(&from->move_lock); + if (!from->move || fence_is_later(fence, from->move)) { + fence_put(from->move); + from->move = fence_get(fence); + } + spin_unlock(&from->move_lock); + + ttm_bo_free_old_node(bo); + + fence_put(bo->moving); + bo->moving = fence_get(fence); + + } else { + /** + * Last resort, wait for the move to be completed. + * + * Should never happen in pratice. + */ + + ret = ttm_bo_wait(bo, false, false); + if (ret) + return ret; + + if (to->flags & TTM_MEMTYPE_FLAG_FIXED) { + ttm_tt_destroy(bo->ttm); + bo->ttm = NULL; + } + ttm_bo_free_old_node(bo); + } + + *old_mem = *new_mem; + new_mem->mm_node = NULL; + + return 0; +} +EXPORT_SYMBOL(ttm_bo_pipeline_move); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 3216878bced3..a6ed9d5e5167 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -48,15 +48,14 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, { int ret = 0; - if (likely(!test_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags))) + if (likely(!bo->moving)) goto out_unlock; /* * Quick non-stalling check for idle. */ - ret = ttm_bo_wait(bo, false, true); - if (likely(ret == 0)) - goto out_unlock; + if (fence_is_signaled(bo->moving)) + goto out_clear; /* * If possible, avoid waiting for GPU with mmap_sem @@ -68,17 +67,23 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, goto out_unlock; up_read(&vma->vm_mm->mmap_sem); - (void) ttm_bo_wait(bo, true, false); + (void) fence_wait(bo->moving, true); goto out_unlock; } /* * Ordinary wait. */ - ret = ttm_bo_wait(bo, true, false); - if (unlikely(ret != 0)) + ret = fence_wait(bo->moving, true); + if (unlikely(ret != 0)) { ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE; + goto out_unlock; + } + +out_clear: + fence_put(bo->moving); + bo->moving = NULL; out_unlock: return ret; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 077ae9b2865d..d28d4333dcce 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -166,11 +166,15 @@ EXPORT_SYMBOL(ttm_tt_set_placement_caching); void ttm_tt_destroy(struct ttm_tt *ttm) { - if (unlikely(ttm == NULL)) + int ret; + + if (ttm == NULL) return; if (ttm->state == tt_bound) { - ttm_tt_unbind(ttm); + ret = ttm->func->unbind(ttm); + BUG_ON(ret); + ttm->state = tt_unbound; } if (ttm->state == tt_unbound) @@ -251,17 +255,6 @@ void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) } EXPORT_SYMBOL(ttm_dma_tt_fini); -void ttm_tt_unbind(struct ttm_tt *ttm) -{ - int ret; - - if (ttm->state == tt_bound) { - ret = ttm->func->unbind(ttm); - BUG_ON(ret); - ttm->state = tt_unbound; - } -} - int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { int ret = 0; diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c index c20408940cd0..17d34e0edbdd 100644 --- a/drivers/gpu/drm/udl/udl_drv.c +++ b/drivers/gpu/drm/udl/udl_drv.c @@ -94,7 +94,6 @@ static void udl_usb_disconnect(struct usb_interface *interface) struct drm_device *dev = usb_get_intfdata(interface); drm_kms_helper_poll_disable(dev); - drm_connector_unregister_all(dev); udl_fbdev_unplug(dev); udl_drop_usb(dev); drm_unplug_dev(dev); diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 4c0f26a644a3..c82d468d178b 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -449,14 +449,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), - vc4_state->mm.start); - - if (debug_dump_regs) { - DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); - vc4_hvs_dump_state(dev); - } - if (crtc->state->event) { unsigned long flags; @@ -466,8 +458,20 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, spin_lock_irqsave(&dev->event_lock, flags); vc4_crtc->event = crtc->state->event; - spin_unlock_irqrestore(&dev->event_lock, flags); crtc->state->event = NULL; + + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + vc4_state->mm.start); + + spin_unlock_irqrestore(&dev->event_lock, flags); + } else { + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + vc4_state->mm.start); + } + + if (debug_dump_regs) { + DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); + vc4_hvs_dump_state(dev); } } @@ -493,12 +497,17 @@ static void vc4_crtc_handle_page_flip(struct vc4_crtc *vc4_crtc) { struct drm_crtc *crtc = &vc4_crtc->base; struct drm_device *dev = crtc->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); + u32 chan = vc4_crtc->channel; unsigned long flags; spin_lock_irqsave(&dev->event_lock, flags); - if (vc4_crtc->event) { + if (vc4_crtc->event && + (vc4_state->mm.start == HVS_READ(SCALER_DISPLACTX(chan)))) { drm_crtc_send_vblank_event(crtc, vc4_crtc->event); vc4_crtc->event = NULL; + drm_crtc_vblank_put(crtc); } spin_unlock_irqrestore(&dev->event_lock, flags); } @@ -549,6 +558,7 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) spin_unlock_irqrestore(&dev->event_lock, flags); } + drm_crtc_vblank_put(crtc); drm_framebuffer_unreference(flip_state->fb); kfree(flip_state); @@ -591,6 +601,8 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, return ret; } + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + /* Immediately update the plane's legacy fb pointer, so that later * modeset prep sees the state that will be present when the semaphore * is released. diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 9e88231b8906..54d0471243dd 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -66,12 +66,12 @@ static const struct file_operations vc4_drm_fops = { }; static const struct drm_ioctl_desc vc4_drm_ioctls[] = { - DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, 0), - DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, 0), + DRM_IOCTL_DEF_DRV(VC4_SUBMIT_CL, vc4_submit_cl_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_WAIT_SEQNO, vc4_wait_seqno_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_WAIT_BO, vc4_wait_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_CREATE_BO, vc4_create_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_MMAP_BO, vc4_mmap_bo_ioctl, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VC4_CREATE_SHADER_BO, vc4_create_shader_bo_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(VC4_GET_HANG_STATE, vc4_get_hang_state_ioctl, DRM_ROOT_ONLY), }; @@ -91,7 +91,7 @@ static struct drm_driver vc4_drm_driver = { .enable_vblank = vc4_enable_vblank, .disable_vblank = vc4_disable_vblank, - .get_vblank_counter = drm_vblank_count, + .get_vblank_counter = drm_vblank_no_hw_counter, #if defined(CONFIG_DEBUG_FS) .debugfs_init = vc4_debugfs_init, diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 8f4d5ffc32be..9a217fd025f3 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -119,10 +119,18 @@ static int vc4_atomic_commit(struct drm_device *dev, return -ENOMEM; /* Make sure that any outstanding modesets have finished. */ - ret = down_interruptible(&vc4->async_modeset); - if (ret) { - kfree(c); - return ret; + if (nonblock) { + ret = down_trylock(&vc4->async_modeset); + if (ret) { + kfree(c); + return -EBUSY; + } + } else { + ret = down_interruptible(&vc4->async_modeset); + if (ret) { + kfree(c); + return ret; + } } ret = drm_atomic_helper_prepare_planes(dev, state); diff --git a/drivers/gpu/drm/vc4/vc4_regs.h b/drivers/gpu/drm/vc4/vc4_regs.h index 6163b95c5411..f99eece4cc97 100644 --- a/drivers/gpu/drm/vc4/vc4_regs.h +++ b/drivers/gpu/drm/vc4/vc4_regs.h @@ -341,6 +341,10 @@ #define SCALER_DISPLACT0 0x00000030 #define SCALER_DISPLACT1 0x00000034 #define SCALER_DISPLACT2 0x00000038 +#define SCALER_DISPLACTX(x) (SCALER_DISPLACT0 + \ + (x) * (SCALER_DISPLACT1 - \ + SCALER_DISPLACT0)) + #define SCALER_DISPCTRL0 0x00000040 # define SCALER_DISPCTRLX_ENABLE BIT(31) # define SCALER_DISPCTRLX_RESET BIT(30) diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c index 35ea5d02a827..29c2aab3c1a7 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.c +++ b/drivers/gpu/drm/vgem/vgem_drv.c @@ -42,81 +42,38 @@ #define DRIVER_MAJOR 1 #define DRIVER_MINOR 0 -void vgem_gem_put_pages(struct drm_vgem_gem_object *obj) -{ - drm_gem_put_pages(&obj->base, obj->pages, false, false); - obj->pages = NULL; -} - static void vgem_gem_free_object(struct drm_gem_object *obj) { struct drm_vgem_gem_object *vgem_obj = to_vgem_bo(obj); - drm_gem_free_mmap_offset(obj); - - if (vgem_obj->use_dma_buf && obj->dma_buf) { - dma_buf_put(obj->dma_buf); - obj->dma_buf = NULL; - } - drm_gem_object_release(obj); - - if (vgem_obj->pages) - vgem_gem_put_pages(vgem_obj); - - vgem_obj->pages = NULL; - kfree(vgem_obj); } -int vgem_gem_get_pages(struct drm_vgem_gem_object *obj) -{ - struct page **pages; - - if (obj->pages || obj->use_dma_buf) - return 0; - - pages = drm_gem_get_pages(&obj->base); - if (IS_ERR(pages)) { - return PTR_ERR(pages); - } - - obj->pages = pages; - - return 0; -} - static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct drm_vgem_gem_object *obj = vma->vm_private_data; - loff_t num_pages; - pgoff_t page_offset; - int ret; - /* We don't use vmf->pgoff since that has the fake offset */ - page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> - PAGE_SHIFT; - - num_pages = DIV_ROUND_UP(obj->base.size, PAGE_SIZE); - - if (page_offset > num_pages) - return VM_FAULT_SIGBUS; - - ret = vm_insert_page(vma, (unsigned long)vmf->virtual_address, - obj->pages[page_offset]); - switch (ret) { - case 0: - return VM_FAULT_NOPAGE; - case -ENOMEM: - return VM_FAULT_OOM; - case -EBUSY: - return VM_FAULT_RETRY; - case -EFAULT: - case -EINVAL: - return VM_FAULT_SIGBUS; - default: - WARN_ON(1); - return VM_FAULT_SIGBUS; + unsigned long vaddr = (unsigned long)vmf->virtual_address; + struct page *page; + + page = shmem_read_mapping_page(file_inode(obj->base.filp)->i_mapping, + (vaddr - vma->vm_start) >> PAGE_SHIFT); + if (!IS_ERR(page)) { + vmf->page = page; + return 0; + } else switch (PTR_ERR(page)) { + case -ENOSPC: + case -ENOMEM: + return VM_FAULT_OOM; + case -EBUSY: + return VM_FAULT_RETRY; + case -EFAULT: + case -EINVAL: + return VM_FAULT_SIGBUS; + default: + WARN_ON_ONCE(PTR_ERR(page)); + return VM_FAULT_SIGBUS; } } @@ -134,57 +91,43 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev, unsigned long size) { struct drm_vgem_gem_object *obj; - struct drm_gem_object *gem_object; - int err; - - size = roundup(size, PAGE_SIZE); + int ret; obj = kzalloc(sizeof(*obj), GFP_KERNEL); if (!obj) return ERR_PTR(-ENOMEM); - gem_object = &obj->base; - - err = drm_gem_object_init(dev, gem_object, size); - if (err) - goto out; - - err = vgem_gem_get_pages(obj); - if (err) - goto out; - - err = drm_gem_handle_create(file, gem_object, handle); - if (err) - goto handle_out; + ret = drm_gem_object_init(dev, &obj->base, roundup(size, PAGE_SIZE)); + if (ret) + goto err_free; - drm_gem_object_unreference_unlocked(gem_object); + ret = drm_gem_handle_create(file, &obj->base, handle); + drm_gem_object_unreference_unlocked(&obj->base); + if (ret) + goto err; - return gem_object; + return &obj->base; -handle_out: - drm_gem_object_release(gem_object); -out: +err_free: kfree(obj); - return ERR_PTR(err); +err: + return ERR_PTR(ret); } static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct drm_gem_object *gem_object; - uint64_t size; - uint64_t pitch = args->width * DIV_ROUND_UP(args->bpp, 8); + u64 pitch, size; + pitch = args->width * DIV_ROUND_UP(args->bpp, 8); size = args->height * pitch; if (size == 0) return -EINVAL; gem_object = vgem_gem_create(dev, file, &args->handle, size); - - if (IS_ERR(gem_object)) { - DRM_DEBUG_DRIVER("object creation failed\n"); + if (IS_ERR(gem_object)) return PTR_ERR(gem_object); - } args->size = gem_object->size; args->pitch = pitch; @@ -194,26 +137,26 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev, return 0; } -int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, - uint32_t handle, uint64_t *offset) +static int vgem_gem_dumb_map(struct drm_file *file, struct drm_device *dev, + uint32_t handle, uint64_t *offset) { - int ret = 0; struct drm_gem_object *obj; + int ret; obj = drm_gem_object_lookup(file, handle); if (!obj) return -ENOENT; + if (!obj->filp) { + ret = -EINVAL; + goto unref; + } + ret = drm_gem_create_mmap_offset(obj); if (ret) goto unref; - BUG_ON(!obj->filp); - - obj->filp->private_data = obj; - *offset = drm_vma_node_offset_addr(&obj->vma_node); - unref: drm_gem_object_unreference_unlocked(obj); @@ -223,24 +166,127 @@ unref: static struct drm_ioctl_desc vgem_ioctls[] = { }; +static int vgem_mmap(struct file *filp, struct vm_area_struct *vma) +{ + unsigned long flags = vma->vm_flags; + int ret; + + ret = drm_gem_mmap(filp, vma); + if (ret) + return ret; + + /* Keep the WC mmaping set by drm_gem_mmap() but our pages + * are ordinary and not special. + */ + vma->vm_flags = flags | VM_DONTEXPAND | VM_DONTDUMP; + return 0; +} + static const struct file_operations vgem_driver_fops = { .owner = THIS_MODULE, .open = drm_open, - .mmap = drm_gem_mmap, + .mmap = vgem_mmap, .poll = drm_poll, .read = drm_read, .unlocked_ioctl = drm_ioctl, .release = drm_release, }; +static int vgem_prime_pin(struct drm_gem_object *obj) +{ + long n_pages = obj->size >> PAGE_SHIFT; + struct page **pages; + + /* Flush the object from the CPU cache so that importers can rely + * on coherent indirect access via the exported dma-address. + */ + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + drm_clflush_pages(pages, n_pages); + drm_gem_put_pages(obj, pages, true, false); + + return 0; +} + +static struct sg_table *vgem_prime_get_sg_table(struct drm_gem_object *obj) +{ + struct sg_table *st; + struct page **pages; + + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) + return ERR_CAST(pages); + + st = drm_prime_pages_to_sg(pages, obj->size >> PAGE_SHIFT); + drm_gem_put_pages(obj, pages, false, false); + + return st; +} + +static void *vgem_prime_vmap(struct drm_gem_object *obj) +{ + long n_pages = obj->size >> PAGE_SHIFT; + struct page **pages; + void *addr; + + pages = drm_gem_get_pages(obj); + if (IS_ERR(pages)) + return NULL; + + addr = vmap(pages, n_pages, 0, pgprot_writecombine(PAGE_KERNEL)); + drm_gem_put_pages(obj, pages, false, false); + + return addr; +} + +static void vgem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + vunmap(vaddr); +} + +static int vgem_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + int ret; + + if (obj->size < vma->vm_end - vma->vm_start) + return -EINVAL; + + if (!obj->filp) + return -ENODEV; + + ret = obj->filp->f_op->mmap(obj->filp, vma); + if (ret) + return ret; + + fput(vma->vm_file); + vma->vm_file = get_file(obj->filp); + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; + vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + + return 0; +} + static struct drm_driver vgem_driver = { - .driver_features = DRIVER_GEM, + .driver_features = DRIVER_GEM | DRIVER_PRIME, .gem_free_object_unlocked = vgem_gem_free_object, .gem_vm_ops = &vgem_gem_vm_ops, .ioctls = vgem_ioctls, .fops = &vgem_driver_fops, + .dumb_create = vgem_gem_dumb_create, .dumb_map_offset = vgem_gem_dumb_map, + + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .gem_prime_pin = vgem_prime_pin, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_get_sg_table = vgem_prime_get_sg_table, + .gem_prime_vmap = vgem_prime_vmap, + .gem_prime_vunmap = vgem_prime_vunmap, + .gem_prime_mmap = vgem_prime_mmap, + .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -248,7 +294,7 @@ static struct drm_driver vgem_driver = { .minor = DRIVER_MINOR, }; -struct drm_device *vgem_device; +static struct drm_device *vgem_device; static int __init vgem_init(void) { @@ -261,7 +307,6 @@ static int __init vgem_init(void) } ret = drm_dev_register(vgem_device, 0); - if (ret) goto out_unref; diff --git a/drivers/gpu/drm/vgem/vgem_drv.h b/drivers/gpu/drm/vgem/vgem_drv.h index e9f92f7ee275..988cbaae7588 100644 --- a/drivers/gpu/drm/vgem/vgem_drv.h +++ b/drivers/gpu/drm/vgem/vgem_drv.h @@ -35,12 +35,6 @@ #define to_vgem_bo(x) container_of(x, struct drm_vgem_gem_object, base) struct drm_vgem_gem_object { struct drm_gem_object base; - struct page **pages; - bool use_dma_buf; }; -/* vgem_drv.c */ -extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj); -extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj); - #endif diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c index a0580815629f..80482ac5f95d 100644 --- a/drivers/gpu/drm/virtio/virtgpu_ttm.c +++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c @@ -375,6 +375,12 @@ static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { + int ret; + + ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); + if (ret) + return ret; + virtio_gpu_move_null(bo, new_mem); return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 60646644bef3..5d5c9515618d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -1041,8 +1041,7 @@ static struct vmw_master *vmw_master_check(struct drm_device *dev, struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); struct vmw_master *vmaster; - if (file_priv->minor->type != DRM_MINOR_LEGACY || - !(flags & DRM_AUTH)) + if (!drm_is_primary_client(file_priv) || !(flags & DRM_AUTH)) return NULL; ret = mutex_lock_interruptible(&dev->master_mutex); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c index 6de283c8fa3e..f0374f9b56ca 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c @@ -28,6 +28,7 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> +#include <linux/frame.h> #include <asm/hypervisor.h> #include "drmP.h" #include "vmwgfx_msg.h" @@ -194,7 +195,7 @@ static int vmw_send_msg(struct rpc_channel *channel, const char *msg) return -EINVAL; } - +STACK_FRAME_NON_STANDARD(vmw_send_msg); /** @@ -304,6 +305,7 @@ static int vmw_recv_msg(struct rpc_channel *channel, void **msg, return 0; } +STACK_FRAME_NON_STANDARD(vmw_recv_msg); /** diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 2df216b39cc5..5f962bfcb43c 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -52,9 +52,9 @@ * * * muxed: Dual GPUs with a multiplexer chip to switch outputs between GPUs. * * muxless: Dual GPUs but only one of them is connected to outputs. - * The other one is merely used to offload rendering, its results - * are copied over PCIe into the framebuffer. On Linux this is - * supported with DRI PRIME. + * The other one is merely used to offload rendering, its results + * are copied over PCIe into the framebuffer. On Linux this is + * supported with DRI PRIME. * * Hybrid graphics started to appear in the late Naughties and were initially * all muxed. Newer laptops moved to a muxless architecture for cost reasons. @@ -560,21 +560,21 @@ EXPORT_SYMBOL(vga_switcheroo_unlock_ddc); * * OFF: Power off the device not in use. * * ON: Power on the device not in use. * * IGD: Switch to the integrated graphics device. - * Power on the integrated GPU if necessary, power off the discrete GPU. - * Prerequisite is that no user space processes (e.g. Xorg, alsactl) - * have opened device files of the GPUs or the audio client. If the - * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/ - * and /dev/snd/controlC1 to identify processes blocking the switch. + * Power on the integrated GPU if necessary, power off the discrete GPU. + * Prerequisite is that no user space processes (e.g. Xorg, alsactl) + * have opened device files of the GPUs or the audio client. If the + * switch fails, the user may invoke lsof(8) or fuser(1) on /dev/dri/ + * and /dev/snd/controlC1 to identify processes blocking the switch. * * DIS: Switch to the discrete graphics device. * * DIGD: Delayed switch to the integrated graphics device. - * This will perform the switch once the last user space process has - * closed the device files of the GPUs and the audio client. + * This will perform the switch once the last user space process has + * closed the device files of the GPUs and the audio client. * * DDIS: Delayed switch to the discrete graphics device. * * MIGD: Mux-only switch to the integrated graphics device. - * Does not remap console or change the power state of either gpu. - * If the integrated GPU is currently off, the screen will turn black. - * If it is on, the screen will show whatever happens to be in VRAM. - * Either way, the user has to blindly enter the command to switch back. + * Does not remap console or change the power state of either gpu. + * If the integrated GPU is currently off, the screen will turn black. + * If it is on, the screen will show whatever happens to be in VRAM. + * Either way, the user has to blindly enter the command to switch back. * * MDIS: Mux-only switch to the discrete graphics device. * * For GPUs whose power state is controlled by the driver's runtime pm, |