diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 858 |
1 files changed, 615 insertions, 243 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c0316eaba547..9f9774f58ce1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -50,23 +50,20 @@ #include <linux/hashtable.h> #include <linux/dma-fence.h> #include <linux/pci.h> -#include <linux/aer.h> -#include <drm/ttm/ttm_bo_api.h> -#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_bo.h> #include <drm/ttm/ttm_placement.h> -#include <drm/ttm/ttm_execbuf_util.h> #include <drm/amdgpu_drm.h> #include <drm/drm_gem.h> #include <drm/drm_ioctl.h> -#include <drm/gpu_scheduler.h> #include <kgd_kfd_interface.h> #include "dm_pp_interface.h" #include "kgd_pp_interface.h" #include "amd_shared.h" +#include "amdgpu_utils.h" #include "amdgpu_mode.h" #include "amdgpu_ih.h" #include "amdgpu_irq.h" @@ -83,15 +80,18 @@ #include "amdgpu_vce.h" #include "amdgpu_vcn.h" #include "amdgpu_jpeg.h" -#include "amdgpu_mn.h" +#include "amdgpu_vpe.h" +#include "amdgpu_umsch_mm.h" #include "amdgpu_gmc.h" #include "amdgpu_gfx.h" #include "amdgpu_sdma.h" +#include "amdgpu_lsdma.h" #include "amdgpu_nbio.h" #include "amdgpu_hdp.h" #include "amdgpu_dm.h" #include "amdgpu_virt.h" #include "amdgpu_csa.h" +#include "amdgpu_mes_ctx.h" #include "amdgpu_gart.h" #include "amdgpu_debugfs.h" #include "amdgpu_job.h" @@ -99,7 +99,6 @@ #include "amdgpu_gem.h" #include "amdgpu_doorbell.h" #include "amdgpu_amdkfd.h" -#include "amdgpu_smu.h" #include "amdgpu_discovery.h" #include "amdgpu_mes.h" #include "amdgpu_umc.h" @@ -108,26 +107,34 @@ #include "amdgpu_df.h" #include "amdgpu_smuio.h" #include "amdgpu_fdinfo.h" +#include "amdgpu_mca.h" +#include "amdgpu_aca.h" +#include "amdgpu_ras.h" +#include "amdgpu_cper.h" +#include "amdgpu_xcp.h" +#include "amdgpu_seq64.h" +#include "amdgpu_reg_state.h" +#include "amdgpu_userq.h" +#include "amdgpu_eviction_fence.h" +#if defined(CONFIG_DRM_AMD_ISP) +#include "amdgpu_isp.h" +#endif -#define MAX_GPU_INSTANCE 16 +#define MAX_GPU_INSTANCE 64 -struct amdgpu_gpu_instance -{ +#define GFX_SLICE_PERIOD_MS 250 + +struct amdgpu_gpu_instance { struct amdgpu_device *adev; int mgpu_fan_enabled; }; -struct amdgpu_mgpu_info -{ +struct amdgpu_mgpu_info { struct amdgpu_gpu_instance gpu_ins[MAX_GPU_INSTANCE]; struct mutex mutex; uint32_t num_gpu; uint32_t num_dgpu; uint32_t num_apu; - - /* delayed reset_func for XGMI configuration if necessary */ - struct delayed_work delayed_reset_work; - bool pending_reset; }; enum amdgpu_ss { @@ -137,8 +144,15 @@ enum amdgpu_ss { AMDGPU_SS_DRV_UNLOAD }; -struct amdgpu_watchdog_timer -{ +struct amdgpu_hwip_reg_entry { + u32 hwip; + u32 inst; + u32 seg; + u32 reg_offset; + const char *reg_name; +}; + +struct amdgpu_watchdog_timer { bool timeout_fatal_disable; uint32_t period; /* maxCycles = (1 << period), the number of cycles before a timeout */ }; @@ -149,13 +163,11 @@ struct amdgpu_watchdog_timer * Modules parameters. */ extern int amdgpu_modeset; -extern int amdgpu_vram_limit; +extern unsigned int amdgpu_vram_limit; extern int amdgpu_vis_vram_limit; extern int amdgpu_gart_size; extern int amdgpu_gtt_size; extern int amdgpu_moverate; -extern int amdgpu_benchmarking; -extern int amdgpu_testing; extern int amdgpu_audio; extern int amdgpu_disp_priority; extern int amdgpu_hw_i2c; @@ -181,14 +193,13 @@ extern int amdgpu_sched_jobs; extern int amdgpu_sched_hw_submission; extern uint amdgpu_pcie_gen_cap; extern uint amdgpu_pcie_lane_cap; -extern uint amdgpu_cg_mask; +extern u64 amdgpu_cg_mask; extern uint amdgpu_pg_mask; extern uint amdgpu_sdma_phase_quantum; extern char *amdgpu_disable_cu; extern char *amdgpu_virtual_display; extern uint amdgpu_pp_feature_mask; extern uint amdgpu_force_long_training; -extern int amdgpu_job_hang_limit; extern int amdgpu_lbpw; extern int amdgpu_compute_multipipe; extern int amdgpu_gpu_recovery; @@ -198,28 +209,43 @@ extern int amdgpu_smu_pptable_id; extern uint amdgpu_dc_feature_mask; extern uint amdgpu_freesync_vid_mode; extern uint amdgpu_dc_debug_mask; -extern uint amdgpu_dm_abm_level; +extern uint amdgpu_dc_visual_confirm; +extern int amdgpu_dm_abm_level; extern int amdgpu_backlight; +extern int amdgpu_damage_clips; extern struct amdgpu_mgpu_info mgpu_info; extern int amdgpu_ras_enable; extern uint amdgpu_ras_mask; extern int amdgpu_bad_page_threshold; +extern bool amdgpu_ignore_bad_page_threshold; extern struct amdgpu_watchdog_timer amdgpu_watchdog_timer; extern int amdgpu_async_gfx_ring; extern int amdgpu_mcbp; extern int amdgpu_discovery; extern int amdgpu_mes; +extern int amdgpu_mes_log_enable; +extern int amdgpu_mes_kiq; +extern int amdgpu_uni_mes; extern int amdgpu_noretry; extern int amdgpu_force_asic_type; extern int amdgpu_smartshift_bias; +extern int amdgpu_use_xgmi_p2p; +extern int amdgpu_mtype_local; +extern int amdgpu_enforce_isolation; #ifdef CONFIG_HSA_AMD extern int sched_policy; extern bool debug_evictions; extern bool no_system_mem_limit; +extern int halt_if_hws_hang; +extern uint amdgpu_svm_default_granularity; #else static const int __maybe_unused sched_policy = KFD_SCHED_POLICY_HWS; static const bool __maybe_unused debug_evictions; /* = false */ static const bool __maybe_unused no_system_mem_limit; +static const int __maybe_unused halt_if_hws_hang; +#endif +#ifdef CONFIG_HSA_AMD_P2P +extern bool pcie_p2p; #endif extern int amdgpu_tmz; @@ -233,9 +259,23 @@ extern int amdgpu_cik_support; #endif extern int amdgpu_num_kcq; +#define AMDGPU_VCNFW_LOG_SIZE (32 * 1024) +#define AMDGPU_UMSCHFW_LOG_SIZE (32 * 1024) +extern int amdgpu_vcnfw_log; +extern int amdgpu_sg_display; +extern int amdgpu_umsch_mm; +extern int amdgpu_seamless; +extern int amdgpu_umsch_mm_fwlog; + +extern int amdgpu_user_partt_mode; +extern int amdgpu_agp; +extern int amdgpu_rebar; + +extern int amdgpu_wbrf; +extern int amdgpu_user_queue; + #define AMDGPU_VM_MAX_NUM_CTX 4096 #define AMDGPU_SG_THRESHOLD (256*1024*1024) -#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */ #define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000 #define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */ #define AMDGPU_FENCE_JIFFIES_TIMEOUT (HZ / 2) @@ -265,18 +305,25 @@ extern int amdgpu_num_kcq; #define AMDGPU_RESET_VCE (1 << 13) #define AMDGPU_RESET_VCE1 (1 << 14) +/* reset mask */ +#define AMDGPU_RESET_TYPE_FULL (1 << 0) /* full adapter reset, mode1/mode2/BACO/etc. */ +#define AMDGPU_RESET_TYPE_SOFT_RESET (1 << 1) /* IP level soft reset */ +#define AMDGPU_RESET_TYPE_PER_QUEUE (1 << 2) /* per queue */ +#define AMDGPU_RESET_TYPE_PER_PIPE (1 << 3) /* per pipe */ + /* max cursor sizes (in pixels) */ #define CIK_CURSOR_WIDTH 128 #define CIK_CURSOR_HEIGHT 128 -/* smasrt shift bias level limits */ +/* smart shift bias level limits */ #define AMDGPU_SMARTSHIFT_MAX_BIAS (100) #define AMDGPU_SMARTSHIFT_MIN_BIAS (-100) +/* Extra time delay(in ms) to eliminate the influence of temperature momentary fluctuation */ +#define AMDGPU_SWCTF_EXTRA_DELAY 50 + +struct amdgpu_xcp_mgr; struct amdgpu_device; -struct amdgpu_ib; -struct amdgpu_cs_parser; -struct amdgpu_job; struct amdgpu_irq_src; struct amdgpu_fpriv; struct amdgpu_bo_va_mapping; @@ -311,7 +358,6 @@ enum amdgpu_kiq_irq { AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, AMDGPU_CP_KIQ_IRQ_LAST }; - #define MAX_KIQ_REG_WAIT 5000 /* in usecs, 5ms */ #define MAX_KIQ_REG_BAILOUT_INTERVAL 5 /* in msecs, 5ms */ #define MAX_KIQ_REG_TRY 1000 @@ -323,13 +369,18 @@ int amdgpu_device_ip_set_powergating_state(void *dev, enum amd_ip_block_type block_type, enum amd_powergating_state state); void amdgpu_device_ip_get_clockgating_state(struct amdgpu_device *adev, - u32 *flags); + u64 *flags); int amdgpu_device_ip_wait_for_idle(struct amdgpu_device *adev, enum amd_ip_block_type block_type); -bool amdgpu_device_ip_is_idle(struct amdgpu_device *adev, +bool amdgpu_device_ip_is_hw(struct amdgpu_device *adev, + enum amd_ip_block_type block_type); +bool amdgpu_device_ip_is_valid(struct amdgpu_device *adev, enum amd_ip_block_type block_type); +int amdgpu_ip_block_suspend(struct amdgpu_ip_block *ip_block); -#define AMDGPU_MAX_IP_NUM 16 +int amdgpu_ip_block_resume(struct amdgpu_ip_block *ip_block); + +#define AMDGPU_MAX_IP_NUM AMD_IP_BLOCK_TYPE_NUM struct amdgpu_ip_block_status { bool valid; @@ -347,12 +398,10 @@ struct amdgpu_ip_block_version { const struct amd_ip_funcs *funcs; }; -#define HW_REV(_Major, _Minor, _Rev) \ - ((((uint32_t) (_Major)) << 16) | ((uint32_t) (_Minor) << 8) | ((uint32_t) (_Rev))) - struct amdgpu_ip_block { struct amdgpu_ip_block_status status; const struct amdgpu_ip_block_version *version; + struct amdgpu_device *adev; }; int amdgpu_device_ip_block_version_cmp(struct amdgpu_device *adev, @@ -371,7 +420,9 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev, */ bool amdgpu_get_bios(struct amdgpu_device *adev); bool amdgpu_read_bios(struct amdgpu_device *adev); - +bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev, + u8 *bios, u32 length_bytes); +void amdgpu_bios_release(struct amdgpu_device *adev); /* * Clocks */ @@ -386,7 +437,6 @@ struct amdgpu_clock { uint32_t default_mclk; uint32_t default_sclk; uint32_t default_dispclk; - uint32_t current_dispclk; uint32_t dp_extclk; uint32_t max_pixel_clock; }; @@ -415,34 +465,13 @@ struct amdgpu_clock { * alignment). */ -#define AMDGPU_SA_NUM_FENCE_LISTS 32 - struct amdgpu_sa_manager { - wait_queue_head_t wq; - struct amdgpu_bo *bo; - struct list_head *hole; - struct list_head flist[AMDGPU_SA_NUM_FENCE_LISTS]; - struct list_head olist; - unsigned size; - uint64_t gpu_addr; - void *cpu_ptr; - uint32_t domain; - uint32_t align; -}; - -/* sub-allocation buffer */ -struct amdgpu_sa_bo { - struct list_head olist; - struct list_head flist; - struct amdgpu_sa_manager *manager; - unsigned soffset; - unsigned eoffset; - struct dma_fence *fence; + struct drm_suballoc_manager base; + struct amdgpu_bo *bo; + uint64_t gpu_addr; + void *cpu_ptr; }; -int amdgpu_fence_slab_init(void); -void amdgpu_fence_slab_fini(void); - /* * IRQS. */ @@ -456,28 +485,12 @@ struct amdgpu_flip_work { uint64_t base; struct drm_pending_vblank_event *event; struct amdgpu_bo *old_abo; - struct dma_fence *excl; unsigned shared_count; struct dma_fence **shared; struct dma_fence_cb cb; bool async; }; - -/* - * CP & rings. - */ - -struct amdgpu_ib { - struct amdgpu_sa_bo *sa_bo; - uint32_t length_dw; - uint64_t gpu_addr; - uint32_t *ptr; - uint32_t flags; -}; - -extern const struct drm_sched_backend_ops amdgpu_sched_ops; - /* * file private structure */ @@ -486,97 +499,83 @@ struct amdgpu_fpriv { struct amdgpu_vm vm; struct amdgpu_bo_va *prt_va; struct amdgpu_bo_va *csa_va; + struct amdgpu_bo_va *seq64_va; struct mutex bo_list_lock; struct idr bo_list_handles; struct amdgpu_ctx_mgr ctx_mgr; -}; - -int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); - -int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, - unsigned size, - enum amdgpu_ib_pool_type pool, - struct amdgpu_ib *ib); -void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, - struct dma_fence *f); -int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, - struct amdgpu_ib *ibs, struct amdgpu_job *job, - struct dma_fence **f); -int amdgpu_ib_pool_init(struct amdgpu_device *adev); -void amdgpu_ib_pool_fini(struct amdgpu_device *adev); -int amdgpu_ib_ring_tests(struct amdgpu_device *adev); - -/* - * CS. - */ -struct amdgpu_cs_chunk { - uint32_t chunk_id; - uint32_t length_dw; - void *kdata; -}; + struct amdgpu_userq_mgr userq_mgr; -struct amdgpu_cs_post_dep { - struct drm_syncobj *syncobj; - struct dma_fence_chain *chain; - u64 point; -}; + /* Eviction fence infra */ + struct amdgpu_eviction_fence_mgr evf_mgr; -struct amdgpu_cs_parser { - struct amdgpu_device *adev; - struct drm_file *filp; - struct amdgpu_ctx *ctx; - - /* chunks */ - unsigned nchunks; - struct amdgpu_cs_chunk *chunks; - - /* scheduler job object */ - struct amdgpu_job *job; - struct drm_sched_entity *entity; - - /* buffer objects */ - struct ww_acquire_ctx ticket; - struct amdgpu_bo_list *bo_list; - struct amdgpu_mn *mn; - struct amdgpu_bo_list_entry vm_pd; - struct list_head validated; - struct dma_fence *fence; - uint64_t bytes_moved_threshold; - uint64_t bytes_moved_vis_threshold; - uint64_t bytes_moved; - uint64_t bytes_moved_vis; - - /* user fence */ - struct amdgpu_bo_list_entry uf_entry; - - unsigned num_post_deps; - struct amdgpu_cs_post_dep *post_deps; + /** GPU partition selection */ + uint32_t xcp_id; }; -static inline u32 amdgpu_get_ib_value(struct amdgpu_cs_parser *p, - uint32_t ib_idx, int idx) -{ - return p->job->ibs[ib_idx].ptr[idx]; -} - -static inline void amdgpu_set_ib_value(struct amdgpu_cs_parser *p, - uint32_t ib_idx, int idx, - uint32_t value) -{ - p->job->ibs[ib_idx].ptr[idx] = value; -} +int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv); /* * Writeback */ -#define AMDGPU_MAX_WB 256 /* Reserve at most 256 WB slots for amdgpu-owned rings. */ +#define AMDGPU_MAX_WB 1024 /* Reserve at most 1024 WB slots for amdgpu-owned rings. */ +/** + * amdgpu_wb - This struct is used for small GPU memory allocation. + * + * This struct is used to allocate a small amount of GPU memory that can be + * used to shadow certain states into the memory. This is especially useful for + * providing easy CPU access to some states without requiring register access + * (e.g., if some block is power gated, reading register may be problematic). + * + * Note: the term writeback was initially used because many of the amdgpu + * components had some level of writeback memory, and this struct initially + * described those components. + */ struct amdgpu_wb { + + /** + * @wb_obj: + * + * Buffer Object used for the writeback memory. + */ struct amdgpu_bo *wb_obj; - volatile uint32_t *wb; + + /** + * @wb: + * + * Pointer to the first writeback slot. In terms of CPU address + * this value can be accessed directly by using the offset as an index. + * For the GPU address, it is necessary to use gpu_addr and the offset. + */ + uint32_t *wb; + + /** + * @gpu_addr: + * + * Writeback base address in the GPU. + */ uint64_t gpu_addr; - u32 num_wb; /* Number of wb slots actually reserved for amdgpu. */ + + /** + * @num_wb: + * + * Number of writeback slots reserved for amdgpu. + */ + u32 num_wb; + + /** + * @used: + * + * Track the writeback slot already used. + */ unsigned long used[DIV_ROUND_UP(AMDGPU_MAX_WB, BITS_PER_LONG)]; + + /** + * @lock: + * + * Protects read and write of the used field array. + */ + spinlock_t lock; }; int amdgpu_device_wb_get(struct amdgpu_device *adev, u32 *wb); @@ -585,13 +584,7 @@ void amdgpu_device_wb_free(struct amdgpu_device *adev, u32 wb); /* * Benchmarking */ -void amdgpu_benchmark(struct amdgpu_device *adev, int test_number); - - -/* - * Testing - */ -void amdgpu_test_moves(struct amdgpu_device *adev); +int amdgpu_benchmark(struct amdgpu_device *adev, int test_number); /* * ASIC specific register table accessible by UMD @@ -601,14 +594,42 @@ struct amdgpu_allowed_register_entry { bool grbm_indexed; }; +/** + * enum amd_reset_method - Methods for resetting AMD GPU devices + * + * @AMD_RESET_METHOD_NONE: The device will not be reset. + * @AMD_RESET_LEGACY: Method reserved for SI, CIK and VI ASICs. + * @AMD_RESET_MODE0: Reset the entire ASIC. Not currently available for the + * any device. + * @AMD_RESET_MODE1: Resets all IP blocks on the ASIC (SDMA, GFX, VCN, etc.) + * individually. Suitable only for some discrete GPU, not + * available for all ASICs. + * @AMD_RESET_MODE2: Resets a lesser level of IPs compared to MODE1. Which IPs + * are reset depends on the ASIC. Notably doesn't reset IPs + * shared with the CPU on APUs or the memory controllers (so + * VRAM is not lost). Not available on all ASICs. + * @AMD_RESET_LINK: Triggers SW-UP link reset on other GPUs + * @AMD_RESET_BACO: BACO (Bus Alive, Chip Off) method powers off and on the card + * but without powering off the PCI bus. Suitable only for + * discrete GPUs. + * @AMD_RESET_PCI: Does a full bus reset using core Linux subsystem PCI reset + * and does a secondary bus reset or FLR, depending on what the + * underlying hardware supports. + * + * Methods available for AMD GPU driver for resetting the device. Not all + * methods are suitable for every device. User can override the method using + * module parameter `reset_method`. + */ enum amd_reset_method { AMD_RESET_METHOD_NONE = -1, AMD_RESET_METHOD_LEGACY = 0, AMD_RESET_METHOD_MODE0, AMD_RESET_METHOD_MODE1, AMD_RESET_METHOD_MODE2, + AMD_RESET_METHOD_LINK, AMD_RESET_METHOD_BACO, AMD_RESET_METHOD_PCI, + AMD_RESET_METHOD_ON_INIT, }; struct amdgpu_video_codec_info { @@ -619,6 +640,13 @@ struct amdgpu_video_codec_info { u32 max_level; }; +#define codec_info_build(type, width, height, level) \ + .codec_type = type,\ + .max_width = width,\ + .max_height = height,\ + .max_pixels_per_frame = height * width,\ + .max_level = level, + struct amdgpu_video_codecs { const u32 codec_count; const struct amdgpu_video_codec_info *codec_array; @@ -663,7 +691,7 @@ struct amdgpu_asic_funcs { /* PCIe replay counter */ uint64_t (*get_pcie_replay_count)(struct amdgpu_device *adev); /* device supports BACO */ - bool (*supports_baco)(struct amdgpu_device *adev); + int (*supports_baco)(struct amdgpu_device *adev); /* pre asic_init quirks */ void (*pre_asic_init)(struct amdgpu_device *adev); /* enter/exit umd stable pstate */ @@ -671,6 +699,12 @@ struct amdgpu_asic_funcs { /* query video codecs */ int (*query_video_codecs)(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs); + /* encode "> 32bits" smn addressing */ + u64 (*encode_ext_smn_addressing)(int ext_id); + + ssize_t (*get_reg_state)(struct amdgpu_device *adev, + enum amdgpu_reg_state reg_state, void *buf, + size_t max_size); }; /* @@ -687,9 +721,9 @@ int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); /* VRAM scratch page for HDP bug, default vram page */ -struct amdgpu_vram_scratch { +struct amdgpu_mem_scratch { struct amdgpu_bo *robj; - volatile uint32_t *ptr; + uint32_t *ptr; u64 gpu_addr; }; @@ -705,15 +739,22 @@ void amdgpu_cgs_destroy_device(struct cgs_device *cgs_device); typedef uint32_t (*amdgpu_rreg_t)(struct amdgpu_device*, uint32_t); typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t); +typedef uint32_t (*amdgpu_rreg_ext_t)(struct amdgpu_device*, uint64_t); +typedef void (*amdgpu_wreg_ext_t)(struct amdgpu_device*, uint64_t, uint32_t); + typedef uint64_t (*amdgpu_rreg64_t)(struct amdgpu_device*, uint32_t); typedef void (*amdgpu_wreg64_t)(struct amdgpu_device*, uint32_t, uint64_t); +typedef uint64_t (*amdgpu_rreg64_ext_t)(struct amdgpu_device*, uint64_t); +typedef void (*amdgpu_wreg64_ext_t)(struct amdgpu_device*, uint64_t, uint64_t); + typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); struct amdgpu_mmio_remap { u32 reg_offset; resource_size_t bus_addr; + struct amdgpu_bo *bo; }; /* Define the HW IP blocks will be used in driver , add more if necessary */ @@ -728,6 +769,7 @@ enum amd_hw_ip_block_type { SDMA5_HWIP, SDMA6_HWIP, SDMA7_HWIP, + LSDMA_HWIP, MMHUB_HWIP, ATHUB_HWIP, NBIO_HWIP, @@ -736,7 +778,9 @@ enum amd_hw_ip_block_type { UVD_HWIP, VCN_HWIP = UVD_HWIP, JPEG_HWIP = VCN_HWIP, + VCN1_HWIP, VCE_HWIP, + VPE_HWIP, DF_HWIP, DCE_HWIP, OSSSYS_HWIP, @@ -747,10 +791,50 @@ enum amd_hw_ip_block_type { CLK_HWIP, UMC_HWIP, RSMU_HWIP, + XGMI_HWIP, + DCI_HWIP, + PCIE_HWIP, + ISP_HWIP, MAX_HWIP }; -#define HWIP_MAX_INSTANCE 8 +#define HWIP_MAX_INSTANCE 44 + +#define HW_ID_MAX 300 +#define IP_VERSION_FULL(mj, mn, rv, var, srev) \ + (((mj) << 24) | ((mn) << 16) | ((rv) << 8) | ((var) << 4) | (srev)) +#define IP_VERSION(mj, mn, rv) IP_VERSION_FULL(mj, mn, rv, 0, 0) +#define IP_VERSION_MAJ(ver) ((ver) >> 24) +#define IP_VERSION_MIN(ver) (((ver) >> 16) & 0xFF) +#define IP_VERSION_REV(ver) (((ver) >> 8) & 0xFF) +#define IP_VERSION_VARIANT(ver) (((ver) >> 4) & 0xF) +#define IP_VERSION_SUBREV(ver) ((ver) & 0xF) +#define IP_VERSION_MAJ_MIN_REV(ver) ((ver) >> 8) + +struct amdgpu_ip_map_info { + /* Map of logical to actual dev instances/mask */ + uint32_t dev_inst[MAX_HWIP][HWIP_MAX_INSTANCE]; + int8_t (*logical_to_dev_inst)(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + int8_t inst); + uint32_t (*logical_to_dev_mask)(struct amdgpu_device *adev, + enum amd_hw_ip_block_type block, + uint32_t mask); +}; + +enum amdgpu_uid_type { + AMDGPU_UID_TYPE_XCD, + AMDGPU_UID_TYPE_AID, + AMDGPU_UID_TYPE_SOC, + AMDGPU_UID_TYPE_MAX +}; + +#define AMDGPU_UID_INST_MAX 8 /* max number of instances for each UID type */ + +struct amdgpu_uid { + uint64_t uid[AMDGPU_UID_TYPE_MAX][AMDGPU_UID_INST_MAX]; + struct amdgpu_device *adev; +}; struct amd_powerplay { void *pp_handle; @@ -796,8 +880,73 @@ struct amd_powerplay { (rid == 0x01) || \ (rid == 0x10)))) +struct amdgpu_mqd_prop { + uint64_t mqd_gpu_addr; + uint64_t hqd_base_gpu_addr; + uint64_t rptr_gpu_addr; + uint64_t wptr_gpu_addr; + uint32_t queue_size; + bool use_doorbell; + uint32_t doorbell_index; + uint64_t eop_gpu_addr; + uint32_t hqd_pipe_priority; + uint32_t hqd_queue_priority; + bool allow_tunneling; + bool hqd_active; + uint64_t shadow_addr; + uint64_t gds_bkup_addr; + uint64_t csa_addr; + uint64_t fence_address; + bool tmz_queue; + bool kernel_queue; +}; + +struct amdgpu_mqd { + unsigned mqd_size; + int (*init_mqd)(struct amdgpu_device *adev, void *mqd, + struct amdgpu_mqd_prop *p); +}; + +struct amdgpu_pcie_reset_ctx { + bool in_link_reset; + bool occurs_dpc; + bool audio_suspended; + struct pci_dev *swus; + struct pci_saved_state *swus_pcistate; + struct pci_saved_state *swds_pcistate; +}; + +/* + * Custom Init levels could be defined for different situations where a full + * initialization of all hardware blocks are not expected. Sample cases are + * custom init sequences after resume after S0i3/S3, reset on initialization, + * partial reset of blocks etc. Presently, this defines only two levels. Levels + * are described in corresponding struct definitions - amdgpu_init_default, + * amdgpu_init_minimal_xgmi. + */ +enum amdgpu_init_lvl_id { + AMDGPU_INIT_LEVEL_DEFAULT, + AMDGPU_INIT_LEVEL_MINIMAL_XGMI, + AMDGPU_INIT_LEVEL_RESET_RECOVERY, +}; + +struct amdgpu_init_level { + enum amdgpu_init_lvl_id level; + uint32_t hwini_ip_block_mask; +}; + #define AMDGPU_RESET_MAGIC_NUM 64 #define AMDGPU_MAX_DF_PERFMONS 4 +struct amdgpu_reset_domain; +struct amdgpu_fru_info; + +enum amdgpu_enforce_isolation_mode { + AMDGPU_ENFORCE_ISOLATION_DISABLE = 0, + AMDGPU_ENFORCE_ISOLATION_ENABLE = 1, + AMDGPU_ENFORCE_ISOLATION_ENABLE_LEGACY = 2, + AMDGPU_ENFORCE_ISOLATION_NO_CLEANER_SHADER = 3, +}; + struct amdgpu_device { struct device *dev; struct pci_dev *pdev; @@ -807,6 +956,7 @@ struct amdgpu_device { struct amdgpu_acp acp; #endif struct amdgpu_hive_info *hive; + struct amdgpu_xcp_mgr *xcp_mgr; /* ASIC */ enum amd_asic_type asic_type; uint32_t family; @@ -820,8 +970,9 @@ struct amdgpu_device { bool need_swiotlb; bool accel_working; struct notifier_block acpi_nb; + struct notifier_block pm_nb; struct amdgpu_i2c_chan *i2c_bus[AMDGPU_MAX_I2C_BUS]; - struct debugfs_blob_wrapper debugfs_vbios_blob; + struct debugfs_blob_wrapper debugfs_vbios_blob; struct mutex srbm_mutex; /* GRBM index mutex. Protects concurrent access to GRBM index */ struct mutex grbm_idx_mutex; @@ -853,8 +1004,12 @@ struct amdgpu_device { amdgpu_wreg_t pcie_wreg; amdgpu_rreg_t pciep_rreg; amdgpu_wreg_t pciep_wreg; + amdgpu_rreg_ext_t pcie_rreg_ext; + amdgpu_wreg_ext_t pcie_wreg_ext; amdgpu_rreg64_t pcie_rreg64; amdgpu_wreg64_t pcie_wreg64; + amdgpu_rreg64_ext_t pcie_rreg64_ext; + amdgpu_wreg64_ext_t pcie_wreg64_ext; /* protects concurrent UVD register access */ spinlock_t uvd_ctx_idx_lock; amdgpu_rreg_t uvd_ctx_rreg; @@ -886,11 +1041,11 @@ struct amdgpu_device { dma_addr_t dummy_page_addr; struct amdgpu_vm_manager vm_manager; struct amdgpu_vmhub vmhub[AMDGPU_MAX_VMHUBS]; - unsigned num_vmhubs; + DECLARE_BITMAP(vmhubs_mask, AMDGPU_MAX_VMHUBS); /* memory management */ struct amdgpu_mman mman; - struct amdgpu_vram_scratch vram_scratch; + struct amdgpu_mem_scratch mem_scratch; struct amdgpu_wb wb; atomic64_t num_bytes_moved; atomic64_t num_evictions; @@ -907,11 +1062,15 @@ struct amdgpu_device { u32 log2_max_MBps; } mm_stats; + /* discovery*/ + struct amdgpu_discovery_info discovery; + /* display */ bool enable_virtual_display; + struct amdgpu_vkms_output *amdgpu_vkms_output; struct amdgpu_mode_info mode_info; /* For pre-DCE11. DCE11 and later are in "struct amdgpu_device->dm" */ - struct work_struct hotplug_work; + struct delayed_work hotplug_work; struct amdgpu_irq_src crtc_irq; struct amdgpu_irq_src vline0_irq; struct amdgpu_irq_src vupdate_irq; @@ -924,6 +1083,7 @@ struct amdgpu_device { u64 fence_context; unsigned num_rings; struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; + struct dma_fence __rcu *gang_submit; bool ib_pool_ready; struct amdgpu_sa_manager ib_pools[AMDGPU_IB_POOL_MAX]; struct amdgpu_sched gpu_sched[AMDGPU_HW_IP_NUM][AMDGPU_RING_PRIO_MAX]; @@ -933,14 +1093,8 @@ struct amdgpu_device { /* powerplay */ struct amd_powerplay powerplay; - bool pp_force_state_enabled; - - /* smu */ - struct smu_context smu; - - /* dpm */ struct amdgpu_pm pm; - u32 cg_flags; + u64 cg_flags; u32 pg_flags; /* nbio */ @@ -964,6 +1118,9 @@ struct amdgpu_device { /* sdma */ struct amdgpu_sdma sdma; + /* lsdma */ + struct amdgpu_lsdma lsdma; + /* uvd */ struct amdgpu_uvd uvd; @@ -976,6 +1133,13 @@ struct amdgpu_device { /* jpeg */ struct amdgpu_jpeg jpeg; + /* vpe */ + struct amdgpu_vpe vpe; + + /* umsch */ + struct amdgpu_umsch_mm umsch_mm; + bool enable_umsch_mm; + /* firmwares */ struct amdgpu_firmware firmware; @@ -985,8 +1149,8 @@ struct amdgpu_device { /* GDS */ struct amdgpu_gds gds; - /* KFD */ - struct amdgpu_kfd_dev kfd; + /* for userq and VM fences */ + struct amdgpu_seq64 seq64; /* UMC */ struct amdgpu_umc umc; @@ -994,13 +1158,43 @@ struct amdgpu_device { /* display related functionality */ struct amdgpu_display_manager dm; +#if defined(CONFIG_DRM_AMD_ISP) + /* isp */ + struct amdgpu_isp isp; +#endif + /* mes */ bool enable_mes; + bool enable_mes_kiq; + bool enable_uni_mes; struct amdgpu_mes mes; + struct amdgpu_mqd mqds[AMDGPU_HW_IP_NUM]; + const struct amdgpu_userq_funcs *userq_funcs[AMDGPU_HW_IP_NUM]; + + /* xarray used to retrieve the user queue fence driver reference + * in the EOP interrupt handler to signal the particular user + * queue fence. + */ + struct xarray userq_xa; + /** + * @userq_doorbell_xa: Global user queue map (doorbell index → queue) + * Key: doorbell_index (unique global identifier for the queue) + * Value: struct amdgpu_usermode_queue + */ + struct xarray userq_doorbell_xa; /* df */ struct amdgpu_df df; + /* MCA */ + struct amdgpu_mca mca; + + /* ACA */ + struct amdgpu_aca aca; + + /* CPER */ + struct amdgpu_cper cper; + struct amdgpu_ip_block ip_blocks[AMDGPU_MAX_IP_NUM]; uint32_t harvest_ip_mask; int num_ip_blocks; @@ -1014,16 +1208,13 @@ struct amdgpu_device { /* soc15 register offset based on ip, instance and segment */ uint32_t *reg_offset[MAX_HWIP][HWIP_MAX_INSTANCE]; + struct amdgpu_ip_map_info ip_map; /* delayed work_func for deferring clockgating during resume */ struct delayed_work delayed_init_work; struct amdgpu_virt virt; - /* link all shadow bo */ - struct list_head shadow_list; - struct mutex shadow_list_lock; - /* record hw reset is performed */ bool has_hw_reset; u8 reset_magic[AMDGPU_RESET_MAGIC_NUM]; @@ -1033,10 +1224,9 @@ struct amdgpu_device { bool in_s3; bool in_s4; bool in_s0ix; + suspend_state_t last_suspend_state; - atomic_t in_gpu_reset; enum pp_mp1_state mp1_state; - struct rw_semaphore reset_sem; struct amdgpu_doorbell_index doorbell_index; struct mutex notifier_lock; @@ -1049,36 +1239,108 @@ struct amdgpu_device { long sdma_timeout; long video_timeout; long compute_timeout; + long psp_timeout; uint64_t unique_id; uint64_t df_perfmon_config_assign_mask[AMDGPU_MAX_DF_PERFMONS]; /* enable runtime pm on the device */ - bool runpm; bool in_runpm; bool has_pr3; - bool pm_sysfs_en; bool ucode_sysfs_en; - /* Chip product information */ - char product_number[16]; - char product_name[32]; - char serial[20]; - - struct amdgpu_autodump autodump; - + struct amdgpu_fru_info *fru_info; atomic_t throttling_logging_enabled; struct ratelimit_state throttling_logging_rs; uint32_t ras_hw_enabled; uint32_t ras_enabled; + bool ras_default_ecc_enabled; bool no_hw_access; struct pci_saved_state *pci_state; + pci_channel_state_t pci_channel_state; + + struct amdgpu_pcie_reset_ctx pcie_reset_ctx; + + /* Track auto wait count on s_barrier settings */ + bool barrier_has_auto_waitcnt; struct amdgpu_reset_control *reset_cntl; + uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE]; + + bool ram_is_direct_mapped; + + struct list_head ras_list; + + struct amdgpu_reset_domain *reset_domain; + + struct mutex benchmark_mutex; + + bool scpm_enabled; + uint32_t scpm_status; + + struct work_struct reset_work; + + bool dc_enabled; + /* Mask of active clusters */ + uint32_t aid_mask; + + /* Debug */ + bool debug_vm; + bool debug_largebar; + bool debug_disable_soft_recovery; + bool debug_use_vram_fw_buf; + bool debug_enable_ras_aca; + bool debug_exp_resets; + bool debug_disable_gpu_ring_reset; + bool debug_vm_userptr; + bool debug_disable_ce_logs; + bool debug_enable_ce_cs; + + /* Protection for the following isolation structure */ + struct mutex enforce_isolation_mutex; + enum amdgpu_enforce_isolation_mode enforce_isolation[MAX_XCP]; + struct amdgpu_isolation { + void *owner; + struct dma_fence *spearhead; + struct amdgpu_sync active; + struct amdgpu_sync prev; + } isolation[MAX_XCP]; + + struct amdgpu_init_level *init_lvl; + + /* This flag is used to determine how VRAM allocations are handled for APUs + * in KFD: VRAM or GTT. + */ + bool apu_prefer_gtt; + + bool userq_halt_for_enforce_isolation; + struct work_struct userq_reset_work; + struct amdgpu_uid *uid_info; + + /* KFD + * Must be last --ends in a flexible-array member. + */ + struct amdgpu_kfd_dev kfd; }; +static inline uint32_t amdgpu_ip_version(const struct amdgpu_device *adev, + uint8_t ip, uint8_t inst) +{ + /* This considers only major/minor/rev and ignores + * subrevision/variant fields. + */ + return adev->ip_versions[ip][inst] & ~0xFFU; +} + +static inline uint32_t amdgpu_ip_version_full(const struct amdgpu_device *adev, + uint8_t ip, uint8_t inst) +{ + /* This returns full version - major/minor/rev/variant/subrevision */ + return adev->ip_versions[ip][inst]; +} + static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) { return container_of(ddev, struct amdgpu_device, ddev); @@ -1094,6 +1356,11 @@ static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev) return container_of(bdev, struct amdgpu_device, mman.bdev); } +static inline bool amdgpu_is_multi_aid(struct amdgpu_device *adev) +{ + return !!adev->aid_mask; +} + int amdgpu_device_init(struct amdgpu_device *adev, uint32_t flags); void amdgpu_device_fini_hw(struct amdgpu_device *adev); @@ -1101,40 +1368,64 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev); int amdgpu_gpu_wait_for_idle(struct amdgpu_device *adev); +void amdgpu_device_mm_access(struct amdgpu_device *adev, loff_t pos, + void *buf, size_t size, bool write); +size_t amdgpu_device_aper_access(struct amdgpu_device *adev, loff_t pos, + void *buf, size_t size, bool write); + void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos, - uint32_t *buf, size_t size, bool write); + void *buf, size_t size, bool write); +uint32_t amdgpu_device_wait_on_rreg(struct amdgpu_device *adev, + uint32_t inst, uint32_t reg_addr, char reg_name[], + uint32_t expected_value, uint32_t mask); uint32_t amdgpu_device_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t acc_flags); +u32 amdgpu_device_indirect_rreg_ext(struct amdgpu_device *adev, + u64 reg_addr); +uint32_t amdgpu_device_xcc_rreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t acc_flags, + uint32_t xcc_id); void amdgpu_device_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags); +void amdgpu_device_indirect_wreg_ext(struct amdgpu_device *adev, + u64 reg_addr, u32 reg_data); +void amdgpu_device_xcc_wreg(struct amdgpu_device *adev, + uint32_t reg, uint32_t v, + uint32_t acc_flags, + uint32_t xcc_id); void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, - uint32_t reg, uint32_t v); + uint32_t reg, uint32_t v, uint32_t xcc_id); void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value); uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset); u32 amdgpu_device_indirect_rreg(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr); u64 amdgpu_device_indirect_rreg64(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr); +u64 amdgpu_device_indirect_rreg64_ext(struct amdgpu_device *adev, + u64 reg_addr); void amdgpu_device_indirect_wreg(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr, u32 reg_data); void amdgpu_device_indirect_wreg64(struct amdgpu_device *adev, - u32 pcie_index, u32 pcie_data, u32 reg_addr, u64 reg_data); - -bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type); +void amdgpu_device_indirect_wreg64_ext(struct amdgpu_device *adev, + u64 reg_addr, u64 reg_data); +u32 amdgpu_device_get_rev_id(struct amdgpu_device *adev); +bool amdgpu_device_asic_has_dc_support(struct pci_dev *pdev, + enum amd_asic_type asic_type); bool amdgpu_device_has_dc_support(struct amdgpu_device *adev); +void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev); + int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, struct amdgpu_reset_context *reset_context); int amdgpu_do_asic_reset(struct list_head *device_list_handle, struct amdgpu_reset_context *reset_context); +int amdgpu_device_reinit_after_reset(struct amdgpu_reset_context *reset_context); + int emu_soc_asic_init(struct amdgpu_device *adev); /* @@ -1146,8 +1437,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define RREG32_NO_KIQ(reg) amdgpu_device_rreg(adev, (reg), AMDGPU_REGS_NO_KIQ) #define WREG32_NO_KIQ(reg, v) amdgpu_device_wreg(adev, (reg), (v), AMDGPU_REGS_NO_KIQ) -#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg)) -#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v)) +#define RREG32_KIQ(reg) amdgpu_kiq_rreg(adev, (reg), 0) +#define WREG32_KIQ(reg, v) amdgpu_kiq_wreg(adev, (reg), (v), 0) #define RREG8(reg) amdgpu_mm_rreg8(adev, (reg)) #define WREG8(reg, v) amdgpu_mm_wreg8(adev, (reg), (v)) @@ -1157,12 +1448,18 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define WREG32(reg, v) amdgpu_device_wreg(adev, (reg), (v), 0) #define REG_SET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) #define REG_GET(FIELD, v) (((v) << FIELD##_SHIFT) & FIELD##_MASK) +#define RREG32_XCC(reg, inst) amdgpu_device_xcc_rreg(adev, (reg), 0, inst) +#define WREG32_XCC(reg, v, inst) amdgpu_device_xcc_wreg(adev, (reg), (v), 0, inst) #define RREG32_PCIE(reg) adev->pcie_rreg(adev, (reg)) #define WREG32_PCIE(reg, v) adev->pcie_wreg(adev, (reg), (v)) #define RREG32_PCIE_PORT(reg) adev->pciep_rreg(adev, (reg)) #define WREG32_PCIE_PORT(reg, v) adev->pciep_wreg(adev, (reg), (v)) +#define RREG32_PCIE_EXT(reg) adev->pcie_rreg_ext(adev, (reg)) +#define WREG32_PCIE_EXT(reg, v) adev->pcie_wreg_ext(adev, (reg), (v)) #define RREG64_PCIE(reg) adev->pcie_rreg64(adev, (reg)) #define WREG64_PCIE(reg, v) adev->pcie_wreg64(adev, (reg), (v)) +#define RREG64_PCIE_EXT(reg) adev->pcie_rreg64_ext(adev, (reg)) +#define WREG64_PCIE_EXT(reg, v) adev->pcie_wreg64_ext(adev, (reg), (v)) #define RREG32_SMC(reg) adev->smc_rreg(adev, (reg)) #define WREG32_SMC(reg, v) adev->smc_wreg(adev, (reg), (v)) #define RREG32_UVD_CTX(reg) adev->uvd_ctx_rreg(adev, (reg)) @@ -1218,6 +1515,7 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define WREG32_FIELD_OFFSET(reg, offset, field, val) \ WREG32(mm##reg + offset, (RREG32(mm##reg + offset) & ~REG_FIELD_MASK(reg, field)) | (val) << REG_FIELD_SHIFT(reg, field)) +#define AMDGPU_GET_REG_FIELD(x, h, l) (((x) & GENMASK_ULL(h, l)) >> (l)) /* * BIOS helpers. */ @@ -1228,7 +1526,8 @@ int emu_soc_asic_init(struct amdgpu_device *adev); /* * ASICs macro. */ -#define amdgpu_asic_set_vga_state(adev, state) (adev)->asic_funcs->set_vga_state((adev), (state)) +#define amdgpu_asic_set_vga_state(adev, state) \ + ((adev)->asic_funcs->set_vga_state ? (adev)->asic_funcs->set_vga_state((adev), (state)) : 0) #define amdgpu_asic_reset(adev) (adev)->asic_funcs->reset((adev)) #define amdgpu_asic_reset_method(adev) (adev)->asic_funcs->reset_method((adev)) #define amdgpu_asic_get_xclk(adev) (adev)->asic_funcs->get_xclk((adev)) @@ -1241,10 +1540,6 @@ int emu_soc_asic_init(struct amdgpu_device *adev); #define amdgpu_asic_read_bios_from_rom(adev, b, l) (adev)->asic_funcs->read_bios_from_rom((adev), (b), (l)) #define amdgpu_asic_read_register(adev, se, sh, offset, v)((adev)->asic_funcs->read_register((adev), (se), (sh), (offset), (v))) #define amdgpu_asic_get_config_memsize(adev) (adev)->asic_funcs->get_config_memsize((adev)) -#define amdgpu_asic_flush_hdp(adev, r) \ - ((adev)->asic_funcs->flush_hdp ? (adev)->asic_funcs->flush_hdp((adev), (r)) : (adev)->hdp.funcs->flush_hdp((adev), (r))) -#define amdgpu_asic_invalidate_hdp(adev, r) \ - ((adev)->asic_funcs->invalidate_hdp ? (adev)->asic_funcs->invalidate_hdp((adev), (r)) : (adev)->hdp.funcs->invalidate_hdp((adev), (r))) #define amdgpu_asic_need_full_reset(adev) (adev)->asic_funcs->need_full_reset((adev)) #define amdgpu_asic_init_doorbell_index(adev) (adev)->asic_funcs->init_doorbell_index((adev)) #define amdgpu_asic_get_pcie_usage(adev, cnt0, cnt1) ((adev)->asic_funcs->get_pcie_usage((adev), (cnt0), (cnt1))) @@ -1256,16 +1551,24 @@ int emu_soc_asic_init(struct amdgpu_device *adev); ((adev)->asic_funcs->update_umd_stable_pstate ? (adev)->asic_funcs->update_umd_stable_pstate((adev), (enter)) : 0) #define amdgpu_asic_query_video_codecs(adev, e, c) (adev)->asic_funcs->query_video_codecs((adev), (e), (c)) -#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)); +#define amdgpu_inc_vram_lost(adev) atomic_inc(&((adev)->vram_lost_counter)) + +#define BIT_MASK_UPPER(i) ((i) >= BITS_PER_LONG ? 0 : ~0UL << (i)) +#define for_each_inst(i, inst_mask) \ + for (i = ffs(inst_mask); i-- != 0; \ + i = ffs(inst_mask & BIT_MASK_UPPER(i + 1))) /* Common functions */ bool amdgpu_device_has_job_running(struct amdgpu_device *adev); bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev); int amdgpu_device_gpu_recover(struct amdgpu_device *adev, - struct amdgpu_job* job); + struct amdgpu_job *job, + struct amdgpu_reset_context *reset_context); void amdgpu_device_pci_config_reset(struct amdgpu_device *adev); int amdgpu_device_pci_reset(struct amdgpu_device *adev); bool amdgpu_device_need_post(struct amdgpu_device *adev); +bool amdgpu_device_seamless_boot_supported(struct amdgpu_device *adev); +bool amdgpu_device_should_use_aspm(struct amdgpu_device *adev); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, u64 num_vis_bytes); @@ -1275,44 +1578,53 @@ void amdgpu_device_program_register_sequence(struct amdgpu_device *adev, const u32 array_size); int amdgpu_device_mode1_reset(struct amdgpu_device *adev); -bool amdgpu_device_supports_atpx(struct drm_device *dev); -bool amdgpu_device_supports_px(struct drm_device *dev); -bool amdgpu_device_supports_boco(struct drm_device *dev); -bool amdgpu_device_supports_smart_shift(struct drm_device *dev); -bool amdgpu_device_supports_baco(struct drm_device *dev); +int amdgpu_device_link_reset(struct amdgpu_device *adev); +bool amdgpu_device_supports_atpx(struct amdgpu_device *adev); +bool amdgpu_device_supports_px(struct amdgpu_device *adev); +bool amdgpu_device_supports_boco(struct amdgpu_device *adev); +bool amdgpu_device_supports_smart_shift(struct amdgpu_device *adev); +int amdgpu_device_supports_baco(struct amdgpu_device *adev); +void amdgpu_device_detect_runtime_pm_mode(struct amdgpu_device *adev); bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, struct amdgpu_device *peer_adev); -int amdgpu_device_baco_enter(struct drm_device *dev); -int amdgpu_device_baco_exit(struct drm_device *dev); +int amdgpu_device_baco_enter(struct amdgpu_device *adev); +int amdgpu_device_baco_exit(struct amdgpu_device *adev); void amdgpu_device_flush_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring); void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring); +void amdgpu_device_halt(struct amdgpu_device *adev); +u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, + u32 reg); +void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, + u32 reg, u32 v); +struct dma_fence *amdgpu_device_get_gang(struct amdgpu_device *adev); +struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, + struct dma_fence *gang); +struct dma_fence *amdgpu_device_enforce_isolation(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_job *job); +bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev); +ssize_t amdgpu_get_soft_full_reset_mask(struct amdgpu_ring *ring); +ssize_t amdgpu_show_reset_mask(char *buf, uint32_t supported_reset); + /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) void amdgpu_register_atpx_handler(void); void amdgpu_unregister_atpx_handler(void); bool amdgpu_has_atpx_dgpu_power_cntl(void); bool amdgpu_is_atpx_hybrid(void); -bool amdgpu_atpx_dgpu_req_power_for_displays(void); bool amdgpu_has_atpx(void); #else static inline void amdgpu_register_atpx_handler(void) {} static inline void amdgpu_unregister_atpx_handler(void) {} static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; } static inline bool amdgpu_is_atpx_hybrid(void) { return false; } -static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false; } static inline bool amdgpu_has_atpx(void) { return false; } #endif -#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI) -void *amdgpu_atpx_get_dhandle(void); -#else -static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; } -#endif - /* * KMS */ @@ -1321,20 +1633,18 @@ extern const int amdgpu_max_kms_ioctl; int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags); void amdgpu_driver_unload_kms(struct drm_device *dev); -void amdgpu_driver_lastclose_kms(struct drm_device *dev); int amdgpu_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_postclose_kms(struct drm_device *dev, struct drm_file *file_priv); void amdgpu_driver_release_kms(struct drm_device *dev); -int amdgpu_device_ip_suspend(struct amdgpu_device *adev); +int amdgpu_device_prepare(struct drm_device *dev); +void amdgpu_device_complete(struct drm_device *dev); int amdgpu_device_suspend(struct drm_device *dev, bool fbcon); int amdgpu_device_resume(struct drm_device *dev, bool fbcon); u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc); int amdgpu_enable_vblank_kms(struct drm_crtc *crtc); void amdgpu_disable_vblank_kms(struct drm_crtc *crtc); -long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd, - unsigned long arg); int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); @@ -1359,6 +1669,12 @@ struct amdgpu_afmt_acr amdgpu_afmt_acr(uint32_t clock); /* amdgpu_acpi.c */ +struct amdgpu_numa_info { + uint64_t size; + int pxm; + int nid; +}; + /* ATCS Device/Driver State */ #define AMDGPU_ATCS_PSC_DEV_STATE_D0 0 #define AMDGPU_ATCS_PSC_DEV_STATE_D3_HOT 3 @@ -1374,34 +1690,57 @@ int amdgpu_acpi_pcie_performance_request(struct amdgpu_device *adev, u8 perf_req, bool advertise); int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, u8 dev_state, bool drv_state); -int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state); +int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, + enum amdgpu_ss ss_state); int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); +int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, u64 *tmr_offset, + u64 *tmr_size); +int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, int xcc_id, + struct amdgpu_numa_info *numa_info); void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); -bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev); +bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev); void amdgpu_acpi_detect(void); +void amdgpu_acpi_release(void); #else static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } +static inline int amdgpu_acpi_get_tmr_info(struct amdgpu_device *adev, + u64 *tmr_offset, u64 *tmr_size) +{ + return -EINVAL; +} +static inline int amdgpu_acpi_get_mem_info(struct amdgpu_device *adev, + int xcc_id, + struct amdgpu_numa_info *numa_info) +{ + return -EINVAL; +} static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } -static inline bool amdgpu_acpi_is_s0ix_supported(struct amdgpu_device *adev) { return false; } +static inline bool amdgpu_acpi_should_gpu_reset(struct amdgpu_device *adev) { return false; } static inline void amdgpu_acpi_detect(void) { } +static inline void amdgpu_acpi_release(void) { } static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, u8 dev_state, bool drv_state) { return 0; } -static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, - enum amdgpu_ss ss_state) { return 0; } +static inline int amdgpu_acpi_smart_shift_update(struct amdgpu_device *adev, + enum amdgpu_ss ss_state) +{ + return 0; +} +static inline void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps) { } #endif -int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, - uint64_t addr, struct amdgpu_bo **bo, - struct amdgpu_bo_va_mapping **mapping); - -#if defined(CONFIG_DRM_AMD_DC) -int amdgpu_dm_display_resume(struct amdgpu_device *adev ); +#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) +bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev); +bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); #else -static inline int amdgpu_dm_display_resume(struct amdgpu_device *adev) { return 0; } +static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } +static inline bool amdgpu_acpi_is_s3_active(struct amdgpu_device *adev) { return false; } #endif +#if defined(CONFIG_DRM_AMD_ISP) +int amdgpu_acpi_get_isp4_dev(struct acpi_device **dev); +#endif void amdgpu_register_gpu_instance(struct amdgpu_device *adev); void amdgpu_unregister_gpu_instance(struct amdgpu_device *adev); @@ -1422,6 +1761,15 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev, int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state); +static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev) +{ + return amdgpu_gpu_recovery != 0 && + adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT && + adev->compute_timeout != MAX_SCHEDULE_TIMEOUT && + adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT && + adev->video_timeout != MAX_SCHEDULE_TIMEOUT; +} + #include "amdgpu_object.h" static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) @@ -1429,8 +1777,32 @@ static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) return adev->gmc.tmz_enabled; } -static inline int amdgpu_in_reset(struct amdgpu_device *adev) +int amdgpu_in_reset(struct amdgpu_device *adev); + +extern const struct attribute_group amdgpu_vram_mgr_attr_group; +extern const struct attribute_group amdgpu_gtt_mgr_attr_group; +extern const struct attribute_group amdgpu_flash_attr_group; + +void amdgpu_set_init_level(struct amdgpu_device *adev, + enum amdgpu_init_lvl_id lvl); + +static inline int amdgpu_device_bus_status_check(struct amdgpu_device *adev) { - return atomic_read(&adev->in_gpu_reset); + u32 status; + int r; + + r = pci_read_config_dword(adev->pdev, PCI_COMMAND, &status); + if (r || PCI_POSSIBLE_ERROR(status)) { + dev_err(adev->dev, "device lost from bus!"); + return -ENODEV; + } + + return 0; } + +void amdgpu_device_set_uid(struct amdgpu_uid *uid_info, + enum amdgpu_uid_type type, uint8_t inst, + uint64_t uid); +uint64_t amdgpu_device_get_uid(struct amdgpu_uid *uid_info, + enum amdgpu_uid_type type, uint8_t inst); #endif |
